diff options
Diffstat (limited to 'arch/xtensa')
35 files changed, 670 insertions, 1633 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index c1e69a1f92a..2e74cb0b780 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -34,31 +34,24 @@ config GENERIC_HARDIRQS bool default y +config ARCH_HAS_ILOG2_U32 + bool + default n + +config ARCH_HAS_ILOG2_U64 + bool + default n + source "init/Kconfig" menu "Processor type and features" choice prompt "Xtensa Processor Configuration" - default XTENSA_CPU_LINUX_BE + default XTENSA_VARIANT_FSF -config XTENSA_CPU_LINUX_BE - bool "linux_be" - ---help--- - The linux_be processor configuration is the baseline Xtensa - configurations included in this kernel and also used by - binutils, gcc, and gdb. It contains no TIE, no coprocessors, - and the following configuration options: - - Code Density Option 2 Misc Special Registers - NSA/NSAU Instructions 128-bit Data Bus Width - Processor ID 8K, 2-way I and D Caches - Zero-Overhead Loops 2 Inst Address Break Registers - Big Endian 2 Data Address Break Registers - 64 General-Purpose Registers JTAG Interface and Trace Port - 17 Interrupts MMU w/ TLBs and Autorefill - 3 Interrupt Levels 8 Autorefill Ways (I/D TLBs) - 3 Timers Unaligned Exceptions +config XTENSA_VARIANT_FSF + bool "fsf" endchoice config MMU diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 3a3a4c66ef8..95f836db38f 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -11,13 +11,13 @@ # this architecture # Core configuration. -# (Use CPU=<xtensa_config> to use another default compiler.) +# (Use VAR=<xtensa_config> to use another default compiler.) -cpu-$(CONFIG_XTENSA_CPU_LINUX_BE) := linux_be -cpu-$(CONFIG_XTENSA_CPU_LINUX_CUSTOM) := linux_custom +variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf +variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom -CPU = $(cpu-y) -export CPU +VARIANT = $(variant-y) +export VARIANT # Platform configuration @@ -27,8 +27,6 @@ platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss PLATFORM = $(platform-y) export PLATFORM -CPPFLAGS += $(if $(KBUILD_SRC),-I$(srctree)/include/asm-xtensa/) -CPPFLAGS += -Iinclude/asm CFLAGS += -pipe -mlongcalls KBUILD_DEFCONFIG := iss_defconfig @@ -41,12 +39,12 @@ core-$(CONFIG_EMBEDDED_RAMDISK) += arch/xtensa/boot/ramdisk/ # Test for cross compiling -ifneq ($(CPU),) +ifneq ($(VARIANT),) COMPILE_ARCH = $(shell uname -m) ifneq ($(COMPILE_ARCH), xtensa) ifndef CROSS_COMPILE - CROSS_COMPILE = xtensa_$(CPU)- + CROSS_COMPILE = xtensa_$(VARIANT)- endif endif endif @@ -68,14 +66,13 @@ archinc := include/asm-xtensa archprepare: $(archinc)/.platform -# Update machine cpu and platform symlinks if something which affects +# Update processor variant and platform symlinks if something which affects # them changed. $(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/auto.conf - @echo ' SYMLINK $(archinc)/xtensa/config -> $(archinc)/xtensa/config-$(CPU)' + @echo ' SYMLINK $(archinc)/variant -> $(archinc)/variant-$(VARIANT)' $(Q)mkdir -p $(archinc) - $(Q)mkdir -p $(archinc)/xtensa - $(Q)ln -fsn $(srctree)/$(archinc)/xtensa/config-$(CPU) $(archinc)/xtensa/config + $(Q)ln -fsn $(srctree)/$(archinc)/variant-$(VARIANT) $(archinc)/variant @echo ' SYMLINK $(archinc)/platform -> $(archinc)/platform-$(PLATFORM)' $(Q)ln -fsn $(srctree)/$(archinc)/platform-$(PLATFORM) $(archinc)/platform @touch $@ @@ -89,7 +86,7 @@ zImage zImage.initrd: vmlinux $(Q)$(MAKE) $(build)=$(boot) $@ CLEAN_FILES += arch/xtensa/vmlinux.lds \ - $(archinc)/platform $(archinc)/xtensa/config \ + $(archinc)/platform $(archinc)/variant \ $(archinc)/.platform define archhelp diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S index f857fc760aa..464298bc348 100644 --- a/arch/xtensa/boot/boot-elf/bootstrap.S +++ b/arch/xtensa/boot/boot-elf/bootstrap.S @@ -1,7 +1,4 @@ -#include <xtensa/config/specreg.h> -#include <xtensa/config/core.h> - #include <asm/bootparam.h> diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S index ee636b0da81..84848123e2a 100644 --- a/arch/xtensa/boot/boot-redboot/bootstrap.S +++ b/arch/xtensa/boot/boot-redboot/bootstrap.S @@ -1,9 +1,7 @@ - -#define _ASMLANGUAGE -#include <xtensa/config/specreg.h> -#include <xtensa/config/core.h> -#include <xtensa/cacheasm.h> - +#include <asm/variant/core.h> +#include <asm/regs.h> +#include <asm/asmmacro.h> +#include <asm/cacheasm.h> /* * RB-Data: RedBoot data/bss * P: Boot-Parameters @@ -77,8 +75,14 @@ _start: /* Note: The assembler cannot relax "addi a0, a0, ..." to an l32r, so we load to a4 first. */ - addi a4, a0, __start - __start_a0 - mov a0, a4 + # addi a4, a0, __start - __start_a0 + # mov a0, a4 + + movi a4, __start + movi a5, __start_a0 + add a4, a0, a4 + sub a0, a4, a5 + movi a4, __start movi a5, __reloc_end @@ -106,9 +110,13 @@ _start: /* We have to flush and invalidate the caches here before we jump. */ #if XCHAL_DCACHE_IS_WRITEBACK - dcache_writeback_all a5, a6 + + ___flush_dcache_all a5 a6 + #endif - icache_invalidate_all a5, a6 + + ___invalidate_icache_all a5 a6 + isync movi a11, _reloc jx a11 @@ -209,9 +217,14 @@ _reloc: /* jump to the kernel */ 2: #if XCHAL_DCACHE_IS_WRITEBACK - dcache_writeback_all a5, a6 + + ___flush_dcache_all a5 a6 + #endif - icache_invalidate_all a5, a6 + + ___invalidate_icache_all a5 a6 + + isync movi a5, __start movi a3, boot_initrd_start diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig index 802621dd486..f19854035e6 100644 --- a/arch/xtensa/configs/iss_defconfig +++ b/arch/xtensa/configs/iss_defconfig @@ -53,11 +53,7 @@ CONFIG_CC_ALIGN_JUMPS=0 # # Processor type and features # -CONFIG_XTENSA_ARCH_LINUX_BE=y -# CONFIG_XTENSA_ARCH_LINUX_LE is not set -# CONFIG_XTENSA_ARCH_LINUX_TEST is not set -# CONFIG_XTENSA_ARCH_S5 is not set -# CONFIG_XTENSA_CUSTOM is not set +CONFIG_XTENSA_VARIANT_FSF=y CONFIG_MMU=y # CONFIG_XTENSA_UNALIGNED_USER is not set # CONFIG_PREEMPT is not set diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index a4956578a24..33d6e9d2e83 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -16,14 +16,9 @@ */ #include <linux/linkage.h> -#include <asm/ptrace.h> -#include <asm/ptrace.h> #include <asm/current.h> #include <asm/asm-offsets.h> -#include <asm/pgtable.h> #include <asm/processor.h> -#include <asm/page.h> -#include <asm/thread_info.h> #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION @@ -216,7 +211,7 @@ ENTRY(fast_unaligned) extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble -#if XCHAL_HAVE_NARROW +#if XCHAL_HAVE_DENSITY _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump addi a6, a5, -OP0_S32I_N _beqz a6, .Lstore # S32I.N, do a store @@ -251,7 +246,7 @@ ENTRY(fast_unaligned) #endif __src_b a3, a5, a6 # a3 has the data word -#if XCHAL_HAVE_NARROW +#if XCHAL_HAVE_DENSITY addi a7, a7, 2 # increment PC (assume 16-bit insn) extui a5, a4, INSN_OP0, 4 @@ -279,14 +274,14 @@ ENTRY(fast_unaligned) 1: -#if XCHAL_HAVE_LOOP - rsr a3, LEND # check if we reached LEND - bne a7, a3, 1f - rsr a3, LCOUNT # and LCOUNT != 0 - beqz a3, 1f - addi a3, a3, -1 # decrement LCOUNT and set +#if XCHAL_HAVE_LOOPS + rsr a5, LEND # check if we reached LEND + bne a7, a5, 1f + rsr a5, LCOUNT # and LCOUNT != 0 + beqz a5, 1f + addi a5, a5, -1 # decrement LCOUNT and set rsr a7, LBEG # set PC to LBEGIN - wsr a3, LCOUNT + wsr a5, LCOUNT #endif 1: wsr a7, EPC_1 # skip load instruction @@ -336,7 +331,7 @@ ENTRY(fast_unaligned) movi a6, 0 # mask: ffffffff:00000000 -#if XCHAL_HAVE_NARROW +#if XCHAL_HAVE_DENSITY addi a7, a7, 2 # incr. PC,assume 16-bit instruction extui a5, a4, INSN_OP0, 4 # extract OP0 @@ -359,14 +354,14 @@ ENTRY(fast_unaligned) /* Get memory address */ 1: -#if XCHAL_HAVE_LOOP - rsr a3, LEND # check if we reached LEND - bne a7, a3, 1f - rsr a3, LCOUNT # and LCOUNT != 0 - beqz a3, 1f - addi a3, a3, -1 # decrement LCOUNT and set +#if XCHAL_HAVE_LOOPS + rsr a4, LEND # check if we reached LEND + bne a7, a4, 1f + rsr a4, LCOUNT # and LCOUNT != 0 + beqz a4, 1f + addi a4, a4, -1 # decrement LCOUNT and set rsr a7, LBEG # set PC to LBEGIN - wsr a3, LCOUNT + wsr a4, LCOUNT #endif 1: wsr a7, EPC_1 # skip store instruction @@ -416,6 +411,7 @@ ENTRY(fast_unaligned) /* Restore working register */ + l32i a8, a2, PT_AREG8 l32i a7, a2, PT_AREG7 l32i a6, a2, PT_AREG6 l32i a5, a2, PT_AREG5 @@ -446,7 +442,7 @@ ENTRY(fast_unaligned) mov a1, a2 rsr a0, PS - bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode + bbsi.l a2, PS_UM_BIT, 1f # jump if user mode movi a0, _kernel_exception jx a0 diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 7cd1d7f8f60..b256cfbef34 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -87,6 +87,11 @@ int main(void) DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context)); BLANK(); DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT); + + /* constants */ + DEFINE(_CLONE_VM, CLONE_VM); + DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED); + return 0; } diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index cf5a93fb6a2..01bcb9fcfcb 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -90,7 +90,6 @@ ENTRY(enable_coprocessor) rsync retw -#endif ENTRY(save_coprocessor_extra) entry sp, 16 @@ -197,4 +196,5 @@ _xtensa_reginfo_tables: XCHAL_CP7_SA_CONTENTS_LIBDB .word 0xFC000000 /* invalid register number,marks end of table*/ _xtensa_reginfo_table_end: +#endif diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 89e409e9e0d..9e271ba009b 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -24,7 +24,7 @@ #include <asm/pgtable.h> #include <asm/page.h> #include <asm/signal.h> -#include <xtensa/coreasm.h> +#include <asm/tlbflush.h> /* Unimplemented features. */ @@ -364,7 +364,7 @@ common_exception: movi a2, 1 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] moveqz a3, a2, a0 # a3 = 1 iff interrupt exception - movi a2, PS_WOE_MASK + movi a2, 1 << PS_WOE_BIT or a3, a3, a2 rsr a0, EXCCAUSE xsr a3, PS @@ -399,7 +399,7 @@ common_exception_return: /* Jump if we are returning from kernel exceptions. */ 1: l32i a3, a1, PT_PS - _bbsi.l a3, PS_UM_SHIFT, 2f + _bbsi.l a3, PS_UM_BIT, 2f j kernel_exception_exit /* Specific to a user exception exit: @@ -422,7 +422,7 @@ common_exception_return: * (Hint: There is only one user exception frame on stack) */ - movi a3, PS_WOE_MASK + movi a3, 1 << PS_WOE_BIT _bbsi.l a4, TIF_NEED_RESCHED, 3f _bbci.l a4, TIF_SIGPENDING, 4f @@ -694,7 +694,7 @@ common_exception_exit: ENTRY(debug_exception) rsr a0, EPS + XCHAL_DEBUGLEVEL - bbsi.l a0, PS_EXCM_SHIFT, 1f # exception mode + bbsi.l a0, PS_EXCM_BIT, 1f # exception mode /* Set EPC_1 and EXCCAUSE */ @@ -707,7 +707,7 @@ ENTRY(debug_exception) /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ - movi a2, 1 << PS_EXCM_SHIFT + movi a2, 1 << PS_EXCM_BIT or a2, a0, a2 movi a0, debug_exception # restore a3, debug jump vector wsr a2, PS @@ -715,7 +715,7 @@ ENTRY(debug_exception) /* Switch to kernel/user stack, restore jump vector, and save a0 */ - bbsi.l a2, PS_UM_SHIFT, 2f # jump if user mode + bbsi.l a2, PS_UM_BIT, 2f # jump if user mode addi a2, a1, -16-PT_SIZE # assume kernel stack s32i a0, a2, PT_AREG0 @@ -778,7 +778,7 @@ ENTRY(unrecoverable_exception) wsr a1, WINDOWBASE rsync - movi a1, PS_WOE_MASK | 1 + movi a1, (1 << PS_WOE_BIT) | 1 wsr a1, PS rsync @@ -1004,13 +1004,10 @@ ENTRY(fast_syscall_kernel) rsr a0, DEPC # get syscall-nr _beqz a0, fast_syscall_spill_registers - - addi a0, a0, -__NR_sysxtensa - _beqz a0, fast_syscall_sysxtensa + _beqi a0, __NR_xtensa, fast_syscall_xtensa j kernel_exception - ENTRY(fast_syscall_user) /* Skip syscall. */ @@ -1024,9 +1021,7 @@ ENTRY(fast_syscall_user) rsr a0, DEPC # get syscall-nr _beqz a0, fast_syscall_spill_registers - - addi a0, a0, -__NR_sysxtensa - _beqz a0, fast_syscall_sysxtensa + _beqi a0, __NR_xtensa, fast_syscall_xtensa j user_exception @@ -1047,18 +1042,19 @@ ENTRY(fast_syscall_unrecoverable) /* * sysxtensa syscall handler * - * int sysxtensa (XTENSA_ATOMIC_SET, ptr, val, unused); - * int sysxtensa (XTENSA_ATOMIC_ADD, ptr, val, unused); - * int sysxtensa (XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); - * int sysxtensa (XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); - * a2 a6 a3 a4 a5 + * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); + * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); + * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); + * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); + * a2 a6 a3 a4 a5 * * Entry condition: * - * a0: trashed, original value saved on stack (PT_AREG0) + * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) * a1: a1 - * a2: new stack pointer, original in DEPC - * a3: dispatch table + * a2: new stack pointer, original in a0 and DEPC + * a3: dispatch table, original in excsave_1 + * a4..a15: unchanged * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: a3 * @@ -1091,59 +1087,62 @@ ENTRY(fast_syscall_unrecoverable) #define CATCH \ 67: -ENTRY(fast_syscall_sysxtensa) - - _beqz a6, 1f - _blti a6, SYSXTENSA_COUNT, 2f +ENTRY(fast_syscall_xtensa) -1: j user_exception - -2: xsr a3, EXCSAVE_1 # restore a3, excsave1 - s32i a7, a2, PT_AREG7 + xsr a3, EXCSAVE_1 # restore a3, excsave1 + s32i a7, a2, PT_AREG7 # we need an additional register movi a7, 4 # sizeof(unsigned int) - access_ok a0, a3, a7, a2, .Leac + access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp - _beqi a6, SYSXTENSA_ATOMIC_SET, .Lset - _beqi a6, SYSXTENSA_ATOMIC_EXG_ADD, .Lexg - _beqi a6, SYSXTENSA_ATOMIC_ADD, .Ladd + addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 + _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill + _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp - /* Fall through for SYSXTENSA_ATOMIC_CMP_SWP */ + /* Fall through for ATOMIC_CMP_SWP. */ .Lswp: /* Atomic compare and swap */ -TRY l32i a7, a3, 0 # read old value - bne a7, a4, 1f # same as old value? jump - s32i a5, a3, 0 # different, modify value - movi a7, 1 # and return 1 - j .Lret - -1: movi a7, 0 # same values: return 0 - j .Lret - -.Ladd: /* Atomic add */ -.Lexg: /* Atomic (exchange) add */ +TRY l32i a0, a3, 0 # read old value + bne a0, a4, 1f # same as old value? jump +TRY s32i a5, a3, 0 # different, modify value + l32i a7, a2, PT_AREG7 # restore a7 + l32i a0, a2, PT_AREG0 # restore a0 + movi a2, 1 # and return 1 + addi a6, a6, 1 # restore a6 (really necessary?) + rfe -TRY l32i a7, a3, 0 - add a4, a4, a7 - s32i a4, a3, 0 - j .Lret +1: l32i a7, a2, PT_AREG7 # restore a7 + l32i a0, a2, PT_AREG0 # restore a0 + movi a2, 0 # return 0 (note that we cannot set + addi a6, a6, 1 # restore a6 (really necessary?) + rfe -.Lset: /* Atomic set */ +.Lnswp: /* Atomic set, add, and exg_add. */ -TRY l32i a7, a3, 0 # read old value as return value - s32i a4, a3, 0 # write new value +TRY l32i a7, a3, 0 # orig + add a0, a4, a7 # + arg + moveqz a0, a4, a6 # set +TRY s32i a0, a3, 0 # write new value -.Lret: mov a0, a2 + mov a0, a2 mov a2, a7 - l32i a7, a0, PT_AREG7 - l32i a3, a0, PT_AREG3 - l32i a0, a0, PT_AREG0 + l32i a7, a0, PT_AREG7 # restore a7 + l32i a0, a0, PT_AREG0 # restore a0 + addi a6, a6, 1 # restore a6 (really necessary?) rfe CATCH -.Leac: movi a7, -EFAULT - j .Lret +.Leac: l32i a7, a2, PT_AREG7 # restore a7 + l32i a0, a2, PT_AREG0 # restore a0 + movi a2, -EFAULT + rfe + +.Lill: l32i a7, a2, PT_AREG0 # restore a7 + l32i a0, a2, PT_AREG0 # restore a0 + movi a2, -EINVAL + rfe + @@ -1491,7 +1490,7 @@ ENTRY(_spill_registers) */ rsr a0, PS - _bbci.l a0, PS_UM_SHIFT, 1f + _bbci.l a0, PS_UM_BIT, 1f /* User space: Setup a dummy frame and kill application. * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. @@ -1510,7 +1509,7 @@ ENTRY(_spill_registers) l32i a1, a3, EXC_TABLE_KSTK wsr a3, EXCSAVE_1 - movi a4, PS_WOE_MASK | 1 + movi a4, (1 << PS_WOE_BIT) | 1 wsr a4, PS rsync @@ -1612,7 +1611,7 @@ ENTRY(fast_second_level_miss) rsr a1, PTEVADDR srli a1, a1, PAGE_SHIFT slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK - addi a1, a1, DTLB_WAY_PGTABLE # ... + way_number + addi a1, a1, DTLB_WAY_PGD # ... + way_number wdtlb a0, a1 dsync @@ -1654,7 +1653,7 @@ ENTRY(fast_second_level_miss) mov a1, a2 rsr a2, PS - bbsi.l a2, PS_UM_SHIFT, 1f + bbsi.l a2, PS_UM_BIT, 1f j _kernel_exception 1: j _user_exception @@ -1753,7 +1752,7 @@ ENTRY(fast_store_prohibited) mov a1, a2 rsr a2, PS - bbsi.l a2, PS_UM_SHIFT, 1f + bbsi.l a2, PS_UM_BIT, 1f j _kernel_exception 1: j _user_exception @@ -1907,6 +1906,103 @@ ENTRY(fast_coprocessor) #endif /* XCHAL_EXTRA_SA_SIZE */ /* + * System Calls. + * + * void system_call (struct pt_regs* regs, int exccause) + * a2 a3 + */ + +ENTRY(system_call) + entry a1, 32 + + /* regs->syscall = regs->areg[2] */ + + l32i a3, a2, PT_AREG2 + mov a6, a2 + movi a4, do_syscall_trace_enter + s32i a3, a2, PT_SYSCALL + callx4 a4 + + /* syscall = sys_call_table[syscall_nr] */ + + movi a4, sys_call_table; + movi a5, __NR_syscall_count + movi a6, -ENOSYS + bgeu a3, a5, 1f + + addx4 a4, a3, a4 + l32i a4, a4, 0 + movi a5, sys_ni_syscall; + beq a4, a5, 1f + + /* Load args: arg0 - arg5 are passed via regs. */ + + l32i a6, a2, PT_AREG6 + l32i a7, a2, PT_AREG3 + l32i a8, a2, PT_AREG4 + l32i a9, a2, PT_AREG5 + l32i a10, a2, PT_AREG8 + l32i a11, a2, PT_AREG9 + + /* Pass one additional argument to the syscall: pt_regs (on stack) */ + s32i a2, a1, 0 + + callx4 a4 + +1: /* regs->areg[2] = return_value */ + + s32i a6, a2, PT_AREG2 + movi a4, do_syscall_trace_leave + mov a6, a2 + callx4 a4 + retw + + +/* + * Create a kernel thread + * + * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) + * a2 a2 a3 a4 + */ + +ENTRY(kernel_thread) + entry a1, 16 + + mov a5, a2 # preserve fn over syscall + mov a7, a3 # preserve args over syscall + + movi a3, _CLONE_VM | _CLONE_UNTRACED + movi a2, __NR_clone + or a6, a4, a3 # arg0: flags + mov a3, a1 # arg1: sp + syscall + + beq a3, a1, 1f # branch if parent + mov a6, a7 # args + callx4 a5 # fn(args) + + movi a2, __NR_exit + syscall # return value of fn(args) still in a6 + +1: retw + +/* + * Do a system call from kernel instead of calling sys_execve, so we end up + * with proper pt_regs. + * + * int kernel_execve(const char *fname, char *const argv[], charg *const envp[]) + * a2 a2 a3 a4 + */ + +ENTRY(kernel_execve) + entry a1, 16 + mov a6, a2 # arg0 is in a6 + movi a2, __NR_execve + syscall + + retw + +/* * Task switch. * * struct task* _switch_to (struct task* prev, struct task* next) @@ -1924,7 +2020,7 @@ ENTRY(_switch_to) /* Disable ints while we manipulate the stack pointer; spill regs. */ - movi a5, PS_EXCM_MASK | LOCKLEVEL + movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL xsr a5, PS rsr a3, EXCSAVE_1 rsync @@ -1964,33 +2060,9 @@ ENTRY(ret_from_fork) movi a4, schedule_tail callx4 a4 - movi a4, do_syscall_trace + movi a4, do_syscall_trace_leave + mov a6, a1 callx4 a4 j common_exception_return - - -/* - * Table of syscalls - */ - -.data -.align 4 -.global sys_call_table -sys_call_table: - -#define SYSCALL(call, narg) .word call -#include "syscalls.h" - -/* - * Number of arguments of each syscall - */ - -.global sys_narg_table -sys_narg_table: - -#undef SYSCALL -#define SYSCALL(call, narg) .byte narg -#include "syscalls.h" - diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index c07cb252299..ea89910efa4 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -15,9 +15,9 @@ * Kevin Chea */ -#include <xtensa/cacheasm.h> #include <asm/processor.h> #include <asm/page.h> +#include <asm/cacheasm.h> /* * This module contains the entry code for kernel images. It performs the @@ -32,13 +32,6 @@ * */ - .macro iterate from, to , cmd - .ifeq ((\to - \from) & ~0xfff) - \cmd \from - iterate "(\from+1)", \to, \cmd - .endif - .endm - /* * _start * @@ -64,7 +57,7 @@ _startup: /* Disable interrupts and exceptions. */ - movi a0, XCHAL_PS_EXCM_MASK + movi a0, LOCKLEVEL wsr a0, PS /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ @@ -91,11 +84,11 @@ _startup: movi a1, 15 wsr a0, ICOUNTLEVEL - .macro reset_dbreak num - wsr a0, DBREAKC + \num - .endm - - iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak + .set _index, 0 + .rept XCHAL_NUM_DBREAK - 1 + wsr a0, DBREAKC + _index + .set _index, _index + 1 + .endr #endif /* Clear CCOUNT (not really necessary, but nice) */ @@ -110,10 +103,11 @@ _startup: /* Disable all timers. */ - .macro reset_timer num - wsr a0, CCOMPARE_0 + \num - .endm - iterate 0, XCHAL_NUM_TIMERS-1, reset_timer + .set _index, 0 + .rept XCHAL_NUM_TIMERS - 1 + wsr a0, CCOMPARE + _index + .set _index, _index + 1 + .endr /* Interrupt initialization. */ @@ -139,12 +133,21 @@ _startup: rsync /* Initialize the caches. - * Does not include flushing writeback d-cache. - * a6, a7 are just working registers (clobbered). + * a2, a3 are just working registers (clobbered). */ - icache_reset a2, a3 - dcache_reset a2, a3 +#if XCHAL_DCACHE_LINE_LOCKABLE + ___unlock_dcache_all a2 a3 +#endif + +#if XCHAL_ICACHE_LINE_LOCKABLE + ___unlock_icache_all a2 a3 +#endif + + ___invalidate_dcache_all a2 a3 + ___invalidate_icache_all a2 a3 + + isync /* Unpack data sections * @@ -181,9 +184,9 @@ _startup: movi a2, _bss_start # start of BSS movi a3, _bss_end # end of BSS -1: addi a2, a2, 4 + __loopt a2, a3, a4, 2 s32i a0, a2, 0 - blt a2, a3, 1b + __endla a2, a4, 4 #if XCHAL_DCACHE_IS_WRITEBACK @@ -191,7 +194,7 @@ _startup: * instructions/data are available. */ - dcache_writeback_all a2, a3 + ___flush_dcache_all a2 a3 #endif /* Setup stack and enable window exceptions (keep irqs disabled) */ diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 1cf744ee095..c9ea73b7031 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -4,7 +4,7 @@ * Xtensa built-in interrupt controller and some generic functions copied * from i386. * - * Copyright (C) 2002 - 2005 Tensilica, Inc. + * Copyright (C) 2002 - 2006 Tensilica, Inc. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * * @@ -22,11 +22,6 @@ #include <asm/uaccess.h> #include <asm/platform.h> |