aboutsummaryrefslogtreecommitdiff
path: root/arch/xtensa/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/kernel')
-rw-r--r--arch/xtensa/kernel/.gitignore1
-rw-r--r--arch/xtensa/kernel/Makefile33
-rw-r--r--arch/xtensa/kernel/align.S73
-rw-r--r--arch/xtensa/kernel/asm-offsets.c48
-rw-r--r--arch/xtensa/kernel/coprocessor.S459
-rw-r--r--arch/xtensa/kernel/entry.S1634
-rw-r--r--arch/xtensa/kernel/head.S317
-rw-r--r--arch/xtensa/kernel/irq.c235
-rw-r--r--arch/xtensa/kernel/mcount.S50
-rw-r--r--arch/xtensa/kernel/module.c194
-rw-r--r--arch/xtensa/kernel/mxhead.S85
-rw-r--r--arch/xtensa/kernel/pci-dma.c49
-rw-r--r--arch/xtensa/kernel/pci.c213
-rw-r--r--arch/xtensa/kernel/platform.c9
-rw-r--r--arch/xtensa/kernel/process.c511
-rw-r--r--arch/xtensa/kernel/ptrace.c444
-rw-r--r--arch/xtensa/kernel/semaphore.c226
-rw-r--r--arch/xtensa/kernel/setup.c476
-rw-r--r--arch/xtensa/kernel/signal.c828
-rw-r--r--arch/xtensa/kernel/smp.c607
-rw-r--r--arch/xtensa/kernel/stacktrace.c120
-rw-r--r--arch/xtensa/kernel/syscall.c95
-rw-r--r--arch/xtensa/kernel/syscalls.c268
-rw-r--r--arch/xtensa/kernel/syscalls.h247
-rw-r--r--arch/xtensa/kernel/time.c261
-rw-r--r--arch/xtensa/kernel/traps.c254
-rw-r--r--arch/xtensa/kernel/vectors.S500
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S296
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c63
29 files changed, 4665 insertions, 3931 deletions
diff --git a/arch/xtensa/kernel/.gitignore b/arch/xtensa/kernel/.gitignore
new file mode 100644
index 00000000000..c5f676c3c22
--- /dev/null
+++ b/arch/xtensa/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index d573017a5dd..18d962a8c0c 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -4,15 +4,36 @@
extra-y := head.o vmlinux.lds
-
-obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
- setup.o signal.o syscalls.o time.o traps.o vectors.o platform.o \
- pci-dma.o
-
-## windowspill.o
+obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
+ ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
+ vectors.o
obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
+obj-$(CONFIG_SMP) += smp.o mxhead.o
+
+AFLAGS_head.o += -mtext-section-literals
+
+# In the Xtensa architecture, assembly generates literals which must always
+# precede the L32R instruction with a relative offset less than 256 kB.
+# Therefore, the .text and .literal section must be combined in parenthesis
+# in the linker script, such as: *(.literal .text).
+#
+# We need to post-process the generated vmlinux.lds scripts to convert
+# *(xxx.text) to *(xxx.literal xxx.text) for the following text sections:
+# .text .ref.text .*init.text .*exit.text .text.*
+#
+# Replicate rules in scripts/Makefile.build
+
+sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
+ -e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \
+ -e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g'
+quiet_cmd__cpp_lds_S = LDS $@
+cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
+ | sed $(sed-y) >$@
+$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
+ $(call if_changed_dep,_cpp_lds_S)
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index a4956578a24..d4cef6039a5 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -16,14 +16,9 @@
*/
#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/thread_info.h>
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
@@ -151,9 +146,9 @@
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -175,15 +170,14 @@ ENTRY(fast_unaligned)
s32i a7, a2, PT_AREG7
s32i a8, a2, PT_AREG8
- rsr a0, DEPC
- xsr a3, EXCSAVE_1
+ rsr a0, depc
s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3
/* Keep value of SAR in a0 */
- rsr a0, SAR
- rsr a8, EXCVADDR # load unaligned memory address
+ rsr a0, sar
+ rsr a8, excvaddr # load unaligned memory address
/* Now, identify one of the following load/store instructions.
*
@@ -202,7 +196,7 @@ ENTRY(fast_unaligned)
/* Extract the instruction that caused the unaligned access. */
- rsr a7, EPC_1 # load exception address
+ rsr a7, epc1 # load exception address
movi a3, ~3
and a3, a3, a7 # mask lower bits
@@ -216,7 +210,7 @@ ENTRY(fast_unaligned)
extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
-#if XCHAL_HAVE_NARROW
+#if XCHAL_HAVE_DENSITY
_beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
addi a6, a5, -OP0_S32I_N
_beqz a6, .Lstore # S32I.N, do a store
@@ -251,7 +245,7 @@ ENTRY(fast_unaligned)
#endif
__src_b a3, a5, a6 # a3 has the data word
-#if XCHAL_HAVE_NARROW
+#if XCHAL_HAVE_DENSITY
addi a7, a7, 2 # increment PC (assume 16-bit insn)
extui a5, a4, INSN_OP0, 4
@@ -279,17 +273,17 @@ ENTRY(fast_unaligned)
1:
-#if XCHAL_HAVE_LOOP
- rsr a3, LEND # check if we reached LEND
- bne a7, a3, 1f
- rsr a3, LCOUNT # and LCOUNT != 0
- beqz a3, 1f
- addi a3, a3, -1 # decrement LCOUNT and set
- rsr a7, LBEG # set PC to LBEGIN
- wsr a3, LCOUNT
+#if XCHAL_HAVE_LOOPS
+ rsr a5, lend # check if we reached LEND
+ bne a7, a5, 1f
+ rsr a5, lcount # and LCOUNT != 0
+ beqz a5, 1f
+ addi a5, a5, -1 # decrement LCOUNT and set
+ rsr a7, lbeg # set PC to LBEGIN
+ wsr a5, lcount
#endif
-1: wsr a7, EPC_1 # skip load instruction
+1: wsr a7, epc1 # skip load instruction
extui a4, a4, INSN_T, 4 # extract target register
movi a5, .Lload_table
addx8 a4, a4, a5
@@ -336,7 +330,7 @@ ENTRY(fast_unaligned)
movi a6, 0 # mask: ffffffff:00000000
-#if XCHAL_HAVE_NARROW
+#if XCHAL_HAVE_DENSITY
addi a7, a7, 2 # incr. PC,assume 16-bit instruction
extui a5, a4, INSN_OP0, 4 # extract OP0
@@ -359,17 +353,17 @@ ENTRY(fast_unaligned)
/* Get memory address */
1:
-#if XCHAL_HAVE_LOOP
- rsr a3, LEND # check if we reached LEND
- bne a7, a3, 1f
- rsr a3, LCOUNT # and LCOUNT != 0
- beqz a3, 1f
- addi a3, a3, -1 # decrement LCOUNT and set
- rsr a7, LBEG # set PC to LBEGIN
- wsr a3, LCOUNT
+#if XCHAL_HAVE_LOOPS
+ rsr a4, lend # check if we reached LEND
+ bne a7, a4, 1f
+ rsr a4, lcount # and LCOUNT != 0
+ beqz a4, 1f
+ addi a4, a4, -1 # decrement LCOUNT and set
+ rsr a7, lbeg # set PC to LBEGIN
+ wsr a4, lcount
#endif
-1: wsr a7, EPC_1 # skip store instruction
+1: wsr a7, epc1 # skip store instruction
movi a4, ~3
and a4, a4, a8 # align memory address
@@ -411,11 +405,12 @@ ENTRY(fast_unaligned)
.Lexit:
movi a4, 0
- rsr a3, EXCSAVE_1
+ rsr a3, excsave1
s32i a4, a3, EXC_TABLE_FIXUP
/* Restore working register */
+ l32i a8, a2, PT_AREG8
l32i a7, a2, PT_AREG7
l32i a6, a2, PT_AREG6
l32i a5, a2, PT_AREG5
@@ -424,7 +419,7 @@ ENTRY(fast_unaligned)
/* restore SAR and return */
- wsr a0, SAR
+ wsr a0, sar
l32i a0, a2, PT_AREG0
l32i a2, a2, PT_AREG2
rfe
@@ -442,11 +437,11 @@ ENTRY(fast_unaligned)
l32i a6, a2, PT_AREG6
l32i a5, a2, PT_AREG5
l32i a4, a2, PT_AREG4
- wsr a0, SAR
+ wsr a0, sar
mov a1, a2
- rsr a0, PS
- bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode
+ rsr a0, ps
+ bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
movi a0, _kernel_exception
jx a0
@@ -454,6 +449,6 @@ ENTRY(fast_unaligned)
1: movi a0, _user_exception
jx a0
+ENDPROC(fast_unaligned)
#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
-
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 7cd1d7f8f60..1915c7c889b 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -13,18 +13,18 @@
*/
#include <asm/processor.h>
+#include <asm/coprocessor.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/thread_info.h>
#include <linux/ptrace.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+
#include <asm/ptrace.h>
-#include <asm/processor.h>
#include <asm/uaccess.h>
-#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-#define BLANK() asm volatile("\n->" : : )
-
int main(void)
{
/* struct pt_regs */
@@ -39,7 +39,10 @@ int main(void)
DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
+ DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
+ DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1));
+ DEFINE(PT_THREADPTR, offsetof(struct pt_regs, threadptr));
DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
@@ -62,7 +65,8 @@ int main(void)
DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
- BLANK();
+ DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
+ DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t));
/* struct task_struct */
DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
@@ -70,24 +74,40 @@ int main(void)
DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
DEFINE(TASK_PID, offsetof (struct task_struct, pid));
DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
- DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, thread_info));
+ DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
- BLANK();
/* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
- DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
- DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
- BLANK();
+ DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
+#if XTENSA_HAVE_COPROCESSORS
+ DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
+#endif
+ DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
+ DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
+ DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \
+ thread.current_ds));
/* struct mm_struct */
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
- BLANK();
- DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT);
- return 0;
-}
+ /* struct page */
+ DEFINE(PAGE_FLAGS, offsetof(struct page, flags));
+
+ /* constants */
+ DEFINE(_CLONE_VM, CLONE_VM);
+ DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED);
+ DEFINE(PG_ARCH_1, PG_arch_1);
+ return 0;
+}
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index cf5a93fb6a2..a482df5df2b 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -8,193 +8,348 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2003 - 2007 Tensilica Inc.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+#include <asm/thread_info.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
*
- * Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
+/* IO protection is currently unsupported. */
+
+ENTRY(fast_io_protect)
+
+ wsr a0, excsave1
+ movi a0, unrecoverable_exception
+ callx0 a0
+
+ENDPROC(fast_io_protect)
+
+#if XTENSA_HAVE_COPROCESSORS
+
/*
- * This module contains a table that describes the layout of the various
- * custom registers and states associated with each coprocessor, as well
- * as those not associated with any coprocessor ("extra state").
- * This table is included with core dumps and is available via the ptrace
- * interface, allowing the layout of such register/state information to
- * be modified in the kernel without affecting the debugger. Each
- * register or state is identified using a 32-bit "libdb target number"
- * assigned when the Xtensa processor is generated.
+ * Macros for lazy context switch.
*/
-#include <linux/linkage.h>
-#include <asm/processor.h>
+#define SAVE_CP_REGS(x) \
+ .align 4; \
+ .Lsave_cp_regs_cp##x: \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ xchal_cp##x##_store a2 a4 a5 a6 a7; \
+ .endif; \
+ jx a0
-#if XCHAL_HAVE_CP
+#define SAVE_CP_REGS_TAB(x) \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ .long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table; \
+ .else; \
+ .long 0; \
+ .endif; \
+ .long THREAD_XTREGS_CP##x
-#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE)
-ENTRY(release_coprocessors)
+#define LOAD_CP_REGS(x) \
+ .align 4; \
+ .Lload_cp_regs_cp##x: \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ xchal_cp##x##_load a2 a4 a5 a6 a7; \
+ .endif; \
+ jx a0
- entry a1, 16
- # a2: task
- movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
- movi a4, coprocessor_info+CP_LAST # a4: owner-table
- # a5: tmp
- movi a6, 0 # a6: 0
- rsil a7, LOCKLEVEL # a7: PS
+#define LOAD_CP_REGS_TAB(x) \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ .long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \
+ .else; \
+ .long 0; \
+ .endif; \
+ .long THREAD_XTREGS_CP##x
-1: /* Check if task is coprocessor owner of coprocessor[i]. */
+ SAVE_CP_REGS(0)
+ SAVE_CP_REGS(1)
+ SAVE_CP_REGS(2)
+ SAVE_CP_REGS(3)
+ SAVE_CP_REGS(4)
+ SAVE_CP_REGS(5)
+ SAVE_CP_REGS(6)
+ SAVE_CP_REGS(7)
- l32i a5, a4, COPROCESSOR_INFO_OWNER
- srli a3, a3, 1
- beqz a3, 1f
- addi a4, a4, -8
- beq a2, a5, 1b
+ LOAD_CP_REGS(0)
+ LOAD_CP_REGS(1)
+ LOAD_CP_REGS(2)
+ LOAD_CP_REGS(3)
+ LOAD_CP_REGS(4)
+ LOAD_CP_REGS(5)
+ LOAD_CP_REGS(6)
+ LOAD_CP_REGS(7)
- /* Found an entry: Clear entry CPENABLE bit to disable CP. */
+ .align 4
+.Lsave_cp_regs_jump_table:
+ SAVE_CP_REGS_TAB(0)
+ SAVE_CP_REGS_TAB(1)
+ SAVE_CP_REGS_TAB(2)
+ SAVE_CP_REGS_TAB(3)
+ SAVE_CP_REGS_TAB(4)
+ SAVE_CP_REGS_TAB(5)
+ SAVE_CP_REGS_TAB(6)
+ SAVE_CP_REGS_TAB(7)
- rsr a5, CPENABLE
- s32i a6, a4, COPROCESSOR_INFO_OWNER
- xor a5, a3, a5
- wsr a5, CPENABLE
+.Lload_cp_regs_jump_table:
+ LOAD_CP_REGS_TAB(0)
+ LOAD_CP_REGS_TAB(1)
+ LOAD_CP_REGS_TAB(2)
+ LOAD_CP_REGS_TAB(3)
+ LOAD_CP_REGS_TAB(4)
+ LOAD_CP_REGS_TAB(5)
+ LOAD_CP_REGS_TAB(6)
+ LOAD_CP_REGS_TAB(7)
- bnez a3, 1b
+/*
+ * coprocessor_save(buffer, index)
+ * a2 a3
+ * coprocessor_load(buffer, index)
+ * a2 a3
+ *
+ * Save or load coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from them 'buffer' address.
+ *
+ * Note that these functions don't update the coprocessor_owner information!
+ *
+ */
-1: wsr a7, PS
- rsync
+ENTRY(coprocessor_save)
+
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lsave_cp_regs_jump_table
+ addx8 a3, a3, a0
+ l32i a3, a3, 0
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
retw
+ENDPROC(coprocessor_save)
-ENTRY(disable_coprocessor)
- entry sp, 16
- rsil a7, LOCKLEVEL
- rsr a3, CPENABLE
- movi a4, 1
- ssl a2
- sll a4, a4
- and a4, a3, a4
- xor a3, a3, a4
- wsr a3, CPENABLE
- wsr a7, PS
- rsync
- retw
+ENTRY(coprocessor_load)
-ENTRY(enable_coprocessor)
- entry sp, 16
- rsil a7, LOCKLEVEL
- rsr a3, CPENABLE
- movi a4, 1
- ssl a2
- sll a4, a4
- or a3, a3, a4
- wsr a3, CPENABLE
- wsr a7, PS
- rsync
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lload_cp_regs_jump_table
+ addx4 a3, a3, a0
+ l32i a3, a3, 0
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
retw
-#endif
+ENDPROC(coprocessor_load)
-ENTRY(save_coprocessor_extra)
- entry sp, 16
- xchal_extra_store_funcbody
- retw
+/*
+ * coprocessor_flush(struct task_info*, index)
+ * a2 a3
+ * coprocessor_restore(struct task_info*, index)
+ * a2 a3
+ *
+ * Save or load coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from the coprocessor area
+ * inside the task_info structure.
+ *
+ * Note that these functions don't update the coprocessor_owner information!
+ *
+ */
-ENTRY(restore_coprocessor_extra)
- entry sp, 16
- xchal_extra_load_funcbody
- retw
-ENTRY(save_coprocessor_registers)
- entry sp, 16
- xchal_cpi_store_funcbody
+ENTRY(coprocessor_flush)
+
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lsave_cp_regs_jump_table
+ addx8 a3, a3, a0
+ l32i a4, a3, 4
+ l32i a3, a3, 0
+ add a2, a2, a4
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
retw
-ENTRY(restore_coprocessor_registers)
- entry sp, 16
- xchal_cpi_load_funcbody
+ENDPROC(coprocessor_flush)
+
+ENTRY(coprocessor_restore)
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lload_cp_regs_jump_table
+ addx4 a3, a3, a0
+ l32i a4, a3, 4
+ l32i a3, a3, 0
+ add a2, a2, a4
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
retw
+ENDPROC(coprocessor_restore)
/*
- * The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros
- * describe the contents of coprocessor & extra save areas in terms of
- * undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
- * latter macros here; they expand into a table of the format we want.
- * The general format is:
+ * Entry condition:
*
- * CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum,
- * bitmask, rsv2, rsv3)
- * CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum,
- * bitmask, rsv2, rsv3)
- * CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index,
- * numentries, contentsize, regname_base,
- * regfile_name, rsv2, rsv3)
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
*
- * For this table, we only care about the <libdbnum>, <offset> and <size>
- * fields.
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
-/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */
-
-#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \
- bitmask, rsv2, rsv3) \
- reg_entry libdbnum, offset, size ;
-#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
- bitmask, rsv2, rsv3) \
- reg_entry libdbnum, offset, size ;
-#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
- numentries, contentsize, regname_base, \
- regfile_name, rsv2, rsv3) \
- reg_entry libdbnum, offset, size ;
-
-/* A single table entry: */
- .macro reg_entry libdbnum, offset, size
- .ifne (__last_offset-(__last_group_offset+\offset))
- /* padding entry */
- .word (0xFC000000+__last_offset-(__last_group_offset+\offset))
- .endif
- .word \libdbnum /* actual entry */
- .set __last_offset, __last_group_offset+\offset+\size
- .endm /* reg_entry */
-
-
-/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
- .macro reg_group cpnum, num_entries, align
- .set __last_group_offset, (__last_offset + \align- 1) & -\align
- .ifne \num_entries
- .word 0xFD000000+(\cpnum<<16)+\num_entries
- .endif
- .endm /* reg_group */
+ENTRY(fast_coprocessor_double)
-/*
- * Register info tables.
- */
+ wsr a0, excsave1
+ movi a0, unrecoverable_exception
+ callx0 a0
+
+ENDPROC(fast_coprocessor_double)
+
+ENTRY(fast_coprocessor)
+
+ /* Save remaining registers a1-a3 and SAR */
+
+ s32i a3, a2, PT_AREG3
+ rsr a3, sar
+ s32i a1, a2, PT_AREG1
+ s32i a3, a2, PT_SAR
+ mov a1, a2
+ rsr a2, depc
+ s32i a2, a1, PT_AREG2
+
+ /*
+ * The hal macros require up to 4 temporary registers. We use a3..a6.
+ */
+
+ s32i a4, a1, PT_AREG4
+ s32i a5, a1, PT_AREG5
+ s32i a6, a1, PT_AREG6
+
+ /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
+
+ rsr a3, exccause
+ addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
+
+ /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
+
+ ssl a3 # SAR: 32 - coprocessor_number
+ movi a2, 1
+ rsr a0, cpenable
+ sll a2, a2
+ or a0, a0, a2
+ wsr a0, cpenable
+ rsync
+
+ /* Retrieve previous owner. (a3 still holds CP number) */
+
+ movi a0, coprocessor_owner # list of owners
+ addx4 a0, a3, a0 # entry for CP
+ l32i a4, a0, 0
+
+ beqz a4, 1f # skip 'save' if no previous owner
+
+ /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
+
+ l32i a5, a4, THREAD_CPENABLE
+ xor a5, a5, a2 # (1 << cp-id) still in a2
+ s32i a5, a4, THREAD_CPENABLE
+
+ /*
+ * Get context save area and 'call' save routine.
+ * (a4 still holds previous owner (thread_info), a3 CP number)
+ */
+
+ movi a5, .Lsave_cp_regs_jump_table
+ movi a0, 2f # a0: 'return' address
+ addx8 a3, a3, a5 # a3: coprocessor number
+ l32i a2, a3, 4 # a2: xtregs offset
+ l32i a3, a3, 0 # a3: jump offset
+ add a2, a2, a4
+ add a4, a3, a5 # a4: address of save routine
+ jx a4
+
+ /* Note that only a0 and a1 were preserved. */
+
+2: rsr a3, exccause
+ addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
+ movi a0, coprocessor_owner
+ addx4 a0, a3, a0
+
+ /* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
+
+1: GET_THREAD_INFO (a4, a1)
+ s32i a4, a0, 0
+
+ /* Get context save area and 'call' load routine. */
+
+ movi a5, .Lload_cp_regs_jump_table
+ movi a0, 1f
+ addx8 a3, a3, a5
+ l32i a2, a3, 4 # a2: xtregs offset
+ l32i a3, a3, 0 # a3: jump offset
+ add a2, a2, a4
+ add a4, a3, a5
+ jx a4
+
+ /* Restore all registers and return from exception handler. */
+
+1: l32i a6, a1, PT_AREG6
+ l32i a5, a1, PT_AREG5
+ l32i a4, a1, PT_AREG4
+
+ l32i a0, a1, PT_SAR
+ l32i a3, a1, PT_AREG3
+ l32i a2, a1, PT_AREG2
+ wsr a0, sar
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+
+ rfe
+
+ENDPROC(fast_coprocessor)
+
+ .data
+
+ENTRY(coprocessor_owner)
+
+ .fill XCHAL_CP_MAX, 4, 0
- .section .rodata, "a"
- .globl _xtensa_reginfo_tables
- .globl _xtensa_reginfo_table_size
- .align 4
-_xtensa_reginfo_table_size:
- .word _xtensa_reginfo_table_end - _xtensa_reginfo_tables
-
-_xtensa_reginfo_tables:
- .set __last_offset, 0
- reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN
- XCHAL_EXTRA_SA_CONTENTS_LIBDB
- reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN
- XCHAL_CP0_SA_CONTENTS_LIBDB
- reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN
- XCHAL_CP1_SA_CONTENTS_LIBDB
- reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN
- XCHAL_CP2_SA_CONTENTS_LIBDB
- reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN
- XCHAL_CP3_SA_CONTENTS_LIBDB
- reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN
- XCHAL_CP4_SA_CONTENTS_LIBDB
- reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN
- XCHAL_CP5_SA_CONTENTS_LIBDB
- reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN
- XCHAL_CP6_SA_CONTENTS_LIBDB
- reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
- XCHAL_CP7_SA_CONTENTS_LIBDB
- .word 0xFC000000 /* invalid register number,marks end of table*/
-_xtensa_reginfo_table_end:
+END(coprocessor_owner)
+#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 89e409e9e0d..ef7f4990722 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2005 by Tensilica Inc.
+ * Copyright (C) 2004 - 2008 by Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*
@@ -16,6 +16,7 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/processor.h>
+#include <asm/coprocessor.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -24,14 +25,12 @@
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/signal.h>
-#include <xtensa/coreasm.h>
+#include <asm/tlbflush.h>
+#include <variant/tie-asm.h>
/* Unimplemented features. */
-#undef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
#undef KERNEL_STACK_OVERFLOW_CHECK
-#undef PREEMPTIBLE_KERNEL
-#undef ALLOCA_EXCEPTION_IN_IRAM
/* Not well tested.
*
@@ -91,9 +90,9 @@
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original value in depc
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave1: a3
+ * excsave1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -104,15 +103,14 @@
* excsave has been restored, and
* stack pointer (a1) has been set.
*
- * Note: _user_exception might be at an odd adress. Don't use call0..call12
+ * Note: _user_exception might be at an odd address. Don't use call0..call12
*/
ENTRY(user_exception)
- /* Save a2, a3, and depc, restore excsave_1 and set SP. */
+ /* Save a1, a2, a3, and set SP. */
- xsr a3, EXCSAVE_1
- rsr a0, DEPC
+ rsr a0, depc
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3
@@ -124,15 +122,21 @@ _user_exception:
/* Save SAR and turn off single stepping */
movi a2, 0
- rsr a3, SAR
- wsr a2, ICOUNTLEVEL
+ rsr a3, sar
+ xsr a2, icountlevel
s32i a3, a1, PT_SAR
+ s32i a2, a1, PT_ICOUNTLEVEL
+
+#if XCHAL_HAVE_THREADPTR
+ rur a2, threadptr
+ s32i a2, a1, PT_THREADPTR
+#endif
/* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
- rsr a2, WINDOWBASE
- rsr a3, WINDOWSTART
+ rsr a2, windowbase
+ rsr a3, windowstart
ssr a2
s32i a2, a1, PT_WINDOWBASE
s32i a3, a1, PT_WINDOWSTART
@@ -168,7 +172,7 @@ _user_exception:
* We have to save all registers up to the first '1' from
* the right, except the current frame (bit 0).
* Assume a2 is: 001001000110001
- * All regiser frames starting from the top fiel to the marked '1'
+ * All register frames starting from the top field to the marked '1'
* must be saved.
*/
@@ -203,32 +207,21 @@ _user_exception:
/* WINDOWBASE still in SAR! */
- rsr a2, SAR # original WINDOWBASE
+ rsr a2, sar # original WINDOWBASE
movi a3, 1
ssl a2
sll a3, a3
- wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit
- wsr a2, WINDOWBASE # and WINDOWSTART
+ wsr a3, windowstart # set corresponding WINDOWSTART bit
+ wsr a2, windowbase # and WINDOWSTART
rsync
/* We are back to the original stack pointer (a1) */
-2:
-#if XCHAL_EXTRA_SA_SIZE
-
- /* For user exceptions, save the extra state into the user's TCB.
- * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15
- */
-
- GET_CURRENT(a2,a1)
- addi a2, a2, THREAD_CP_SAVE
- xchal_extra_store_funcbody
-#endif
-
- /* Now, jump to the common exception handler. */
+2: /* Now, jump to the common exception handler. */
j common_exception
+ENDPROC(user_exception)
/*
* First-level exit handler for kernel exceptions
@@ -242,9 +235,9 @@ _user_exception:
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -255,15 +248,14 @@ _user_exception:
* excsave has been restored, and
* stack pointer (a1) has been set.
*
- * Note: _kernel_exception might be at an odd adress. Don't use call0..call12
+ * Note: _kernel_exception might be at an odd address. Don't use call0..call12
*/
ENTRY(kernel_exception)
- /* Save a0, a2, a3, DEPC and set SP. */
+ /* Save a1, a2, a3, and set SP. */
- xsr a3, EXCSAVE_1 # restore a3, excsave_1
- rsr a0, DEPC # get a2
+ rsr a0, depc # get a2
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3
@@ -275,15 +267,16 @@ _kernel_exception:
/* Save SAR and turn off single stepping */
movi a2, 0
- rsr a3, SAR
- wsr a2, ICOUNTLEVEL
+ rsr a3, sar
+ xsr a2, icountlevel
s32i a3, a1, PT_SAR
+ s32i a2, a1, PT_ICOUNTLEVEL
/* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
- rsr a2, WINDOWBASE # don't need to save these, we only
- rsr a3, WINDOWSTART # need shifted windowstart: windowmask
+ rsr a2, windowbase # don't need to save these, we only
+ rsr a3, windowstart # need shifted windowstart: windowmask
ssr a2
slli a2, a3, 32-WSBITS
src a2, a3, a2
@@ -330,24 +323,26 @@ _kernel_exception:
common_exception:
- /* Save EXCVADDR, DEBUGCAUSE, and PC, and clear LCOUNT */
+ /* Save some registers, disable loops and clear the syscall flag. */
- rsr a2, DEBUGCAUSE
- rsr a3, EPC_1
+ rsr a2, debugcause
+ rsr a3, epc1
s32i a2, a1, PT_DEBUGCAUSE
s32i a3, a1, PT_PC
- rsr a3, EXCVADDR
+ movi a2, -1
+ rsr a3, excvaddr
+ s32i a2, a1, PT_SYSCALL
movi a2, 0
s32i a3, a1, PT_EXCVADDR
- xsr a2, LCOUNT
+ xsr a2, lcount
s32i a2, a1, PT_LCOUNT
/* It is now save to restore the EXC_TABLE_FIXUP variable. */
- rsr a0, EXCCAUSE
+ rsr a0, exccause
movi a3, 0
- rsr a2, EXCSAVE_1
+ rsr a2, excsave1
s32i a0, a1, PT_EXCCAUSE
s32i a3, a2, EXC_TABLE_FIXUP
@@ -355,34 +350,62 @@ common_exception:
* so we can allow exceptions and interrupts (*) again.
* Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
*
- * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before
- * (interrupts disabled) and if this exception is not an interrupt.
+ * (*) We only allow interrupts if they were previously enabled and
+ * we're not handling an IRQ
*/
- rsr a3, PS
- addi a0, a0, -4
- movi a2, 1
- extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
- moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
- movi a2, PS_WOE_MASK
+ rsr a3, ps
+ addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT
+ movi a2, LOCKLEVEL
+ extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ # a3 = PS.INTLEVEL
+ moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt
+ movi a2, 1 << PS_WOE_BIT
or a3, a3, a2
- rsr a0, EXCCAUSE
- xsr a3, PS
+ rsr a0, exccause
+ xsr a3, ps
s32i a3, a1, PT_PS # save ps
- /* Save LBEG, LEND */
+ /* Save lbeg, lend */
- rsr a2, LBEG
- rsr a3, LEND
+ rsr a2, lbeg
+ rsr a3, lend
s32i a2, a1, PT_LBEG
s32i a3, a1, PT_LEND
+ /* Save SCOMPARE1 */
+
+#if XCHAL_HAVE_S32C1I
+ rsr a2, scompare1
+ s32i a2, a1, PT_SCOMPARE1
+#endif
+
+ /* Save optional registers. */
+
+ save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ l32i a4, a1, PT_DEPC
+ /* Double exception means we came here with an exception
+ * while PS.EXCM was set, i.e. interrupts disabled.
+ */
+ bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+ l32i a4, a1, PT_EXCCAUSE
+ bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
+ /* We came here with an interrupt means interrupts were enabled
+ * and we've just disabled them.
+ */
+ movi a4, trace_hardirqs_off
+ callx4 a4
+1:
+#endif
+
/* Go to second-level dispatcher. Set up parameters to pass to the
* exception handler and call the exception handler.
*/
- movi a4, exc_table
+ rsr a4, excsave1
mov a6, a1 # pass stack frame
mov a7, a0 # pass EXCCAUSE
addx4 a4, a0, a4
@@ -393,110 +416,113 @@ common_exception:
callx4 a4
/* Jump here for exception exit */
-
+ .global common_exception_return
common_exception_return:
+1:
+ rsil a2, LOCKLEVEL
+
/* Jump if we are returning from kernel exceptions. */
-1: l32i a3, a1, PT_PS
- _bbsi.l a3, PS_UM_SHIFT, 2f
- j kernel_exception_exit
+ l32i a3, a1, PT_PS
+ GET_THREAD_INFO(a2, a1)
+ l32i a4, a2, TI_FLAGS
+ _bbci.l a3, PS_UM_BIT, 6f
/* Specific to a user exception exit:
* We need to check some flags for signal handling and rescheduling,
* and have to restore WB and WS, extra states, and all registers
* in the register file that were in use in the user task.
+ * Note that we don't disable interrupts here.
*/
-2: wsr a3, PS /* disable interrupts */
-
- /* Check for signals (keep interrupts disabled while we read TI_FLAGS)
- * Note: PS.INTLEVEL = 0, PS.EXCM = 1
- */
-
- GET_THREAD_INFO(a2,a1)
- l32i a4, a2, TI_FLAGS
-
- /* Enable interrupts again.
- * Note: When we get here, we certainly have handled any interrupts.
- * (Hint: There is only one user exception frame on stack)
- */
-
- movi a3, PS_WOE_MASK
-
_bbsi.l a4, TIF_NEED_RESCHED, 3f
- _bbci.l a4, TIF_SIGPENDING, 4f
+ _bbsi.l a4, TIF_NOTIFY_RESUME, 2f
+ _bbci.l a4, TIF_SIGPENDING, 5f
-#ifndef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
- l32i a4, a1, PT_DEPC
+2: l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
-#endif
- /* Reenable interrupts and call do_signal() */
+ /* Call do_signal() */
- wsr a3, PS
- movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*)
+ rsil a2, 0
+ movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
mov a6, a1
- movi a7, 0
callx4 a4
j 1b
-3: /* Reenable interrupts and reschedule */
+3: /* Reschedule */
- wsr a3, PS
+ rsil a2, 0
movi a4, schedule # void schedule (void)
callx4 a4
j 1b
- /* Restore the state of the task and return from the exception. */
-
-
- /* If we are returning from a user exception, and the process
- * to run next has PT_SINGLESTEP set, we want to setup
- * ICOUNT and ICOUNTLEVEL to step one instruction.
- * PT_SINGLESTEP is set by sys_ptrace (ptrace.c)
- */
-
-4: /* a2 holds GET_CURRENT(a2,a1) */
+#ifdef CONFIG_PREEMPT
+6:
+ _bbci.l a4, TIF_NEED_RESCHED, 4f
- l32i a3, a2, TI_TASK
- l32i a3, a3, TASK_PTRACE
- bbci.l a3, PT_SINGLESTEP_BIT, 1f # jump if single-step flag is not set
+ /* Check current_thread_info->preempt_count */
- movi a3, -2 # PT_SINGLESTEP flag is set,
- movi a4, 1 # icountlevel of 1 means it won't
- wsr a3, ICOUNT # start counting until after rfe
- wsr a4, ICOUNTLEVEL # so setup icount & icountlevel.
- isync
+ l32i a4, a2, TI_PRE_COUNT
+ bnez a4, 4f
+ movi a4, preempt_schedule_irq
+ callx4 a4
+ j 1b
+#endif
+5:
+#ifdef CONFIG_DEBUG_TLB_SANITY
+ l32i a4, a1, PT_DEPC
+ bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
+ movi a4, check_tlb_sanity
+ callx4 a4
+#endif
+6:
+4:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ l32i a4, a1, PT_DEPC
+ /* Double exception means we came here with an exception
+ * while PS.EXCM was set, i.e. interrupts disabled.
+ */
+ bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+ l32i a4, a1, PT_EXCCAUSE
+ bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
+ /* We came here with an interrupt means interrupts were enabled
+ * and we'll reenable them on return.
+ */
+ movi a4, trace_hardirqs_on
+ callx4 a4
1:
+#endif
+ /* Restore optional registers. */
-#if XCHAL_EXTRA_SA_SIZE
+ load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
- /* For user exceptions, restore the extra state from the user's TCB. */
+ /* Restore SCOMPARE1 */
- /* Note: a2 still contains GET_CURRENT(a2,a1) */
- addi a2, a2, THREAD_CP_SAVE
- xchal_extra_load_funcbody
+#if XCHAL_HAVE_S32C1I
+ l32i a2, a1, PT_SCOMPARE1
+ wsr a2, scompare1
+#endif
+ wsr a3, ps /* disable interrupts */
- /* We must assume that xchal_extra_store_funcbody destroys
- * registers a2..a15. FIXME, this list can eventually be
- * reduced once real register requirements of the macro are
- * finalized. */
+ _bbci.l a3, PS_UM_BIT, kernel_exception_exit
-#endif /* XCHAL_EXTRA_SA_SIZE */
+user_exception_exit:
+ /* Restore the state of the task and return from the exception. */
/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
l32i a2, a1, PT_WINDOWBASE
l32i a3, a1, PT_WINDOWSTART
- wsr a1, DEPC # use DEPC as temp storage
- wsr a3, WINDOWSTART # restore WINDOWSTART
+ wsr a1, depc # use DEPC as temp storage
+ wsr a3, windowstart # restore WINDOWSTART
ssr a2 # preserve user's WB in the SAR
- wsr a2, WINDOWBASE # switch to user's saved WB
+ wsr a2, windowbase # switch to user's saved WB
rsync
- rsr a1, DEPC # restore stack pointer
+ rsr a1, depc # restore stack pointer
l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
rotw -1 # we restore a4..a7
_bltui a6, 16, 1f # only have to restore current window?
@@ -522,8 +548,8 @@ common_exception_return:
/* Clear unrestored registers (don't leak anything to user-land */
-1: rsr a0, WINDOWBASE
- rsr a3, SAR
+1: rsr a0, windowbase
+ rsr a3, sar
sub a3, a0, a3
beqz a3, 2f
extui a3, a3, 0, WBBITS
@@ -542,6 +568,11 @@ common_exception_return:
* (if we have restored WSBITS-1 frames).
*/
+#if XCHAL_HAVE_THREADPTR
+ l32i a3, a1, PT_THREADPTR
+ wur a3, threadptr
+#endif
+
2: j common_exception_exit
/* This is the kernel exception exit.
@@ -551,33 +582,6 @@ common_exception_return:
kernel_exception_exit:
- /* Disable interrupts (a3 holds PT_PS) */
-
- wsr a3, PS
-
-#ifdef PREEMPTIBLE_KERNEL
-
-#ifdef CONFIG_PREEMPT
-
- /*
- * Note: We've just returned from a call4, so we have
- * at least 4 addt'l regs.
- */
-
- /* Check current_thread_info->preempt_count */
-
- GET_THREAD_INFO(a2)
- l32i a3, a2, TI_PREEMPT
- bnez a3, 1f
-
- l32i a2, a2, TI_FLAGS
-
-1:
-
-#endif
-
-#endif
-
/* Check if we have to do a movsp.
*
* We only have to do a movsp if the previous window-frame has
@@ -607,7 +611,7 @@ kernel_exception_exit:
/* Test WINDOWSTART now. If spilled, do the movsp */
- rsr a3, WINDOWSTART
+ rsr a3, windowstart
addi a0, a3, -1
and a3, a3, a0
_bnez a3, common_exception_exit
@@ -633,6 +637,8 @@ kernel_exception_exit:
common_exception_exit:
+ /* Restore address registers. */
+
_bbsi.l a2, 1, 1f
l32i a4, a1, PT_AREG4
l32i a5, a1, PT_AREG5
@@ -653,17 +659,24 @@ common_exception_exit:
1: l32i a2, a1, PT_PC
l32i a3, a1, PT_SAR
- wsr a2, EPC_1
- wsr a3, SAR
+ wsr a2, epc1
+ wsr a3, sar
/* Restore LBEG, LEND, LCOUNT */
l32i a2, a1, PT_LBEG
l32i a3, a1, PT_LEND
- wsr a2, LBEG
+ wsr a2, lbeg
l32i a2, a1, PT_LCOUNT
- wsr a3, LEND
- wsr a2, LCOUNT
+ wsr a3, lend
+ wsr a2, lcount
+
+ /* We control single stepping through the ICOUNTLEVEL register. */
+
+ l32i a2, a1, PT_ICOUNTLEVEL
+ movi a3, -2
+ wsr a2, icountlevel
+ wsr a3, icount
/* Check if it was double exception. */
@@ -678,11 +691,13 @@ common_exception_exit:
l32i a1, a1, PT_AREG1
rfe
-1: wsr a0, DEPC
+1: wsr a0, depc
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
rfde
+ENDPROC(kernel_exception)
+
/*
* Debug exception handler.
*
@@ -693,48 +708,48 @@ common_exception_exit:
ENTRY(debug_exception)
- rsr a0, EPS + XCHAL_DEBUGLEVEL
- bbsi.l a0, PS_EXCM_SHIFT, 1f # exception mode
+ rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
+ bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
- /* Set EPC_1 and EXCCAUSE */
+ /* Set EPC1 and EXCCAUSE */
- wsr a2, DEPC # save a2 temporarily
- rsr a2, EPC + XCHAL_DEBUGLEVEL
- wsr a2, EPC_1
+ wsr a2, depc # save a2 temporarily
+ rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
+ wsr a2, epc1
movi a2, EXCCAUSE_MAPPED_DEBUG
- wsr a2, EXCCAUSE
+ wsr a2, exccause
/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
- movi a2, 1 << PS_EXCM_SHIFT
+ movi a2, 1 << PS_EXCM_BIT
or a2, a0, a2
movi a0, debug_exception # restore a3, debug jump vector
- wsr a2, PS
- xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
+ wsr a2, ps
+ xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
/* Switch to kernel/user stack, restore jump vector, and save a0 */
- bbsi.l a2, PS_UM_SHIFT, 2f # jump if user mode
+ bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
addi a2, a1, -16-PT_SIZE # assume kernel stack
s32i a0, a2, PT_AREG0
movi a0, 0
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_DEPC # mark it as a regular exception
- xsr a0, DEPC
+ xsr a0, depc
s32i a3, a2, PT_AREG3
s32i a0, a2, PT_AREG2
mov a1, a2
j _kernel_exception
-2: rsr a2, EXCSAVE_1
+2: rsr a2, excsave1
l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
s32i a0, a2, PT_AREG0
movi a0, 0
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_DEPC
- xsr a0, DEPC
+ xsr a0, depc
s32i a3, a2, PT_AREG3
s32i a0, a2, PT_AREG2
mov a1, a2
@@ -743,6 +758,7 @@ ENTRY(debug_exception)
/* Debug exception while in exception mode. */
1: j 1b // FIXME!!
+ENDPROC(debug_exception)
/*
* We get here in case of an unrecoverable exception.
@@ -774,12 +790,12 @@ ENTRY(unrecoverable_exception)
movi a0, 1
movi a1, 0
- wsr a0, WINDOWSTART
- wsr a1, WINDOWBASE
+ wsr a0, windowstart
+ wsr a1, windowbase
rsync
- movi a1, PS_WOE_MASK | 1
- wsr a1, PS
+ movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL
+ wsr a1, ps
rsync
movi a1, init_task
@@ -793,6 +809,7 @@ ENTRY(unrecoverable_exception)
1: j 1b
+ENDPROC(unrecoverable_exception)
/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
@@ -801,176 +818,64 @@ ENTRY(unrecoverable_exception)
*
* The ALLOCA handler is entered when user code executes the MOVSP
* instruction and the caller's frame is not in the register file.
- * In this case, the caller frame's a0..a3 are on the stack just
- * below sp (a1), and this handler moves them.
*
- * For "MOVSP <ar>,<as>" without destination register a1, this routine
- * simply moves the value from <as> to <ar> without moving the save area.
+ * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
+ *
+ * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
+ *
+ * It leverages the existing window spill/fill routines and their support for
+ * double exceptions. The 'movsp' instruction will only cause an exception if
+ * the next window needs to be loaded. In fact this ALLOCA exception may be
+ * replaced at some point by changing the hardware to do a underflow exception
+ * of the proper size instead.
+ *
+ * This algorithm simply backs out the register changes started by the user
+ * excpetion handler, makes it appear that we have started a window underflow
+ * by rotating the window back and then setting the old window base (OWB) in
+ * the 'ps' register with the rolled back window base. The 'movsp' instruction
+ * will be re-executed and this time since the next window frames is in the
+ * active AR registers it won't cause an exception.
+ *
+ * If the WindowUnderflow code gets a TLB miss the page will get mapped
+ * the the partial windeowUnderflow will be handeled in the double exception
+ * handler.
*
* Entry condition:
*
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
-#if XCHAL_HAVE_BE
-#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4
-#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4
-#else
-#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4
-#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4
-#endif
-
ENTRY(fast_alloca)
+ rsr a0, windowbase
+ rotw -1
+ rsr a2, ps
+ extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
+ xor a3, a3, a4
+ l32i a4, a6, PT_AREG0
+ l32i a1, a6, PT_DEPC
+ rsr a6, depc
+ wsr a1, depc
+ slli a3, a3, PS_OWB_SHIFT
+ xor a2, a2, a3
+ wsr a2, ps
+ rsync
- /* We shouldn't be in a double exception. */
-
- l32i a0, a2, PT_DEPC
- _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
-
- rsr a0, DEPC # get a2
- s32i a4, a2, PT_AREG4 # save a4 and
- s32i a0, a2, PT_AREG2 # a2 to stack
-
- /* Exit critical section. */
-
- movi a0, 0
- s32i a0, a3, EXC_TABLE_FIXUP
-
- /* Restore a3, excsave_1 */
-
- xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl.
- rsr a4, EPC_1 # get exception address
- s32i a3, a2, PT_AREG3 # save a3 to stack
-
-#ifdef ALLOCA_EXCEPTION_IN_IRAM
-#error iram not supported
-#else
- /* Note: l8ui not allowed in IRAM/IROM!! */
- l8ui a0, a4, 1 # read as(src) from MOVSP instruction
-#endif
- movi a3, .Lmovsp_src
- _EXTUI_MOVSP_SRC(a0) # extract source register number
- addx8 a3, a0, a3
- jx a3
-
-.Lunhandled_double:
- wsr a0, EXCSAVE_1
- movi a0, unrecoverable_exception
- callx0 a0
-
- .align 8
-.Lmovsp_src:
- l32i a3, a2, PT_AREG0; _j 1f; .align 8
- mov a3, a1; _j 1f; .align 8
- l32i a3, a2, PT_AREG2; _j 1f; .align 8
- l32i a3, a2, PT_AREG3; _j 1f; .align 8
- l32i a3, a2, PT_AREG4; _j 1f; .align 8
- mov a3, a5; _j 1f; .align 8
- mov a3, a6; _j 1f; .align 8
- mov a3, a7; _j 1f; .align 8
- mov a3, a8; _j 1f; .align 8
- mov a3, a9; _j 1f; .align 8
- mov a3, a10; _j 1f; .align 8
- mov a3, a11; _j 1f; .align 8
- mov a3, a12; _j 1f; .align 8
- mov a3, a13; _j 1f; .align 8
- mov a3, a14; _j 1f; .align 8
- mov a3, a15; _j 1f; .align 8
-
-1:
-
-#ifdef ALLOCA_EXCEPTION_IN_IRAM
-#error iram not supported
-#else
- l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction
-#endif
- addi a4, a4, 3 # step over movsp
- _EXTUI_MOVSP_DST(a0) # extract destination register
- wsr a4, EPC_1 # save new epc_1
-
- _bnei a0, 1, 1f # no 'movsp a1, ax': jump
-
- /* Move the save area. This implies the use of the L32E
- * and S32E instructions, because this move must be done with
- * the user's PS.RING privilege levels, not with ring 0
- * (kernel's) privileges currently active with PS.EXCM
- * set. Note that we have stil registered a fixup routine with the
- * double exception vector in case a double exception occurs.
- */
-
- /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
-
- l32e a0, a1, -16
- l32e a4, a1, -12
- s32e a0, a3, -16
- s32e a4, a3, -12
- l32e a0, a1, -8
- l32e a4, a1, -4
- s32e a0, a3, -8
- s32e a4, a3, -4
-
- /* Restore stack-pointer and all the other saved registers. */
-
- mov a1, a3
-
- l32i a4, a2, PT_AREG4
- l32i a3, a2, PT_AREG3
- l32i a0, a2, PT_AREG0
- l32i a2, a2, PT_AREG2
- rfe
-
- /* MOVSP <at>,<as> was invoked with <at> != a1.
- * Because the stack pointer is not being modified,
- * we should be able to just modify the pointer
- * without moving any save area.
- * The processor only traps these occurrences if the
- * caller window isn't live, so unfortunately we can't
- * use this as an alternate trap mechanism.
- * So we just do the move. This requires that we
- * resolve the destination register, not just the source,
- * so there's some extra work.
- * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
- */
-
- /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
-
-1: movi a4, .Lmovsp_dst
- addx8 a4, a0, a4
- jx a4
-
- .align 8
-.Lmovsp_dst:
- s32i a3, a2, PT_AREG0; _j 1f; .align 8
- mov a1, a3; _j 1f; .align 8
- s32i a3, a2, PT_AREG2; _j 1f; .align 8
- s32i a3, a2, PT_AREG3; _j 1f; .align 8
- s32i a3, a2, PT_AREG4; _j 1f; .align 8
- mov a5, a3; _j 1f; .align 8
- mov a6, a3; _j 1f; .align 8
- mov a7, a3; _j 1f; .align 8
- mov a8, a3; _j 1f; .align 8
- mov a9, a3; _j 1f; .align 8
- mov a10, a3; _j 1f; .align 8
- mov a11, a3; _j 1f; .align 8
- mov a12, a3; _j 1f; .align 8
- mov a13, a3; _j 1f; .align 8
- mov a14, a3; _j 1f; .align 8
- mov a15, a3; _j 1f; .align 8
-
-1: l32i a4, a2, PT_AREG4
- l32i a3, a2, PT_AREG3
- l32i a0, a2, PT_AREG0
- l32i a2, a2, PT_AREG2
- rfe
-
+ _bbci.l a4, 31, 4f
+ rotw -1
+ _bbci.l a8, 30, 8f
+ rotw -1
+ j _WindowUnderflow12
+8: j _WindowUnderflow8
+4: j _WindowUnderflow4
+ENDPROC(fast_alloca)
/*
* fast system calls.
@@ -986,81 +891,80 @@ ENTRY(fast_alloca)
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*/
ENTRY(fast_syscall_kernel)
/* Skip syscall. */
- rsr a0, EPC_1
+ rsr a0, epc1
addi a0, a0, 3
- wsr a0, EPC_1
+ wsr a0, epc1
l32i a0, a2, PT_DEPC
bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
- rsr a0, DEPC # get syscall-nr
+ rsr a0, depc # get syscall-nr
_beqz a0, fast_syscall_spill_registers
-
- addi a0, a0, -__NR_sysxtensa
- _beqz a0, fast_syscall_sysxtensa
+ _beqi a0, __NR_xtensa, fast_syscall_xtensa
j kernel_exception
+ENDPROC(fast_syscall_kernel)
ENTRY(fast_syscall_user)
/* Skip syscall. */
- rsr a0, EPC_1
+ rsr a0, epc1
addi a0, a0, 3
- wsr a0, EPC_1
+ wsr a0, epc1
l32i a0, a2, PT_DEPC
bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
- rsr a0, DEPC # get syscall-nr
+ rsr a0, depc # get syscall-nr
_beqz a0, fast_syscall_spill_registers
-
- addi a0, a0, -__NR_sysxtensa
- _beqz a0, fast_syscall_sysxtensa
+ _beqi a0, __NR_xtensa, fast_syscall_xtensa
j user_exception
-ENTRY(fast_syscall_unrecoverable)
+ENDPROC(fast_syscall_user)
- /* Restore all states. */
+ENTRY(fast_syscall_unrecoverable)
- l32i a0, a2, PT_AREG0 # restore a0
- xsr a2, DEPC # restore a2, depc
- rsr a3, EXCSAVE_1
+ /* Restore all states. */
- wsr a0, EXCSAVE_1
- movi a0, unrecoverable_exception
- callx0 a0
+ l32i a0, a2, PT_AREG0 # restore a0
+ xsr a2, depc # restore a2, depc
+ wsr a0, excsave1
+ movi a0, unrecoverable_exception
+ callx0 a0
+ENDPROC(fast_syscall_unrecoverable)
/*
* sysxtensa syscall handler
*
- * int sysxtensa (XTENSA_ATOMIC_SET, ptr, val, unused);
- * int sysxtensa (XTENSA_ATOMIC_ADD, ptr, val, unused);
- * int sysxtensa (XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
- * int sysxtensa (XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
- * a2 a6 a3 a4 a5
+ * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
+ * a2 a6 a3 a4 a5
*
* Entry condition:
*
- * a0: trashed, original value saved on stack (PT_AREG0)
+ * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0)
* a1: a1
- * a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a2: new stack pointer, original in a0 and DEPC
+ * a3: a3
+ * a4..a15: unchanged
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -1072,7 +976,7 @@ ENTRY(fast_syscall_unrecoverable)
* TRY adds an entry to the __ex_table fixup table for the immediately
* following instruction.
*
- * CATCH catches any exception that occurred at one of the preceeding TRY
+ * CATCH catches any exception that occurred at one of the preceding TRY
* statements and continues from there
*
* Usage TRY l32i a0, a1, 0
@@ -1091,60 +995,61 @@ ENTRY(fast_syscall_unrecoverable)
#define CATCH \
67:
-ENTRY(fast_syscall_sysxtensa)
-
- _beqz a6, 1f
- _blti a6, SYSXTENSA_COUNT, 2f
-
-1: j user_exception
-
-2: xsr a3, EXCSAVE_1 # restore a3, excsave1
- s32i a7, a2, PT_AREG7
+ENTRY(fast_syscall_xtensa)
+ s32i a7, a2, PT_AREG7 # we need an additional register
movi a7, 4 # sizeof(unsigned int)
- access_ok a0, a3, a7, a2, .Leac
+ access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
- _beqi a6, SYSXTENSA_ATOMIC_SET, .Lset
- _beqi a6, SYSXTENSA_ATOMIC_EXG_ADD, .Lexg
- _beqi a6, SYSXTENSA_ATOMIC_ADD, .Ladd
+ addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
+ _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
+ _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
- /* Fall through for SYSXTENSA_ATOMIC_CMP_SWP */
+ /* Fall through for ATOMIC_CMP_SWP. */
.Lswp: /* Atomic compare and swap */
-TRY l32i a7, a3, 0 # read old value
- bne a7, a4, 1f # same as old value? jump
- s32i a5, a3, 0 # different, modify value
- movi a7, 1 # and return 1
- j .Lret
-
-1: movi a7, 0 # same values: return 0
- j .Lret
-
-.Ladd: /* Atomic add */
-.Lexg: /* Atomic (exchange) add */
+TRY l32i a0, a3, 0 # read old value
+ bne a0, a4, 1f # same as old value? jump
+TRY s32i a5, a3, 0 # different, modify value
+ l32i a7, a2, PT_AREG7 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, 1 # and return 1
+ addi a6, a6, 1 # restore a6 (really necessary?)
+ rfe
-TRY l32i a7, a3, 0
- add a4, a4, a7
- s32i a4, a3, 0
- j .Lret
+1: l32i a7, a2, PT_AREG7 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, 0 # return 0 (note that we cannot set
+ addi a6, a6, 1 # restore a6 (really necessary?)
+ rfe
-.Lset: /* Atomic set */
+.Lnswp: /* Atomic set, add, and exg_add. */
-TRY l32i a7, a3, 0 # read old value as return value
- s32i a4, a3, 0 # write new value
+TRY l32i a7, a3, 0 # orig
+ add a0, a4, a7 # + arg
+ moveqz a0, a4, a6 # set
+TRY s32i a0, a3, 0 # write new value
-.Lret: mov a0, a2
+ mov a0, a2
mov a2, a7
- l32i a7, a0, PT_AREG7
- l32i a3, a0, PT_AREG3
- l32i a0, a0, PT_AREG0
+ l32i a7, a0, PT_AREG7 # restore a7
+ l32i a0, a0, PT_AREG0 # restore a0
+ addi a6, a6, 1 # restore a6 (really necessary?)
rfe
CATCH
-.Leac: movi a7, -EFAULT
- j .Lret
+.Leac: l32i a7, a2, PT_AREG7 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, -EFAULT
+ rfe
+.Lill: l32i a7, a2, PT_AREG0 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, -EINVAL
+ rfe
+
+ENDPROC(fast_syscall_xtensa)
/* fast_syscall_spill_registers.
@@ -1154,231 +1059,79 @@ CATCH
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
- * Note: We don't need to save a2 in depc (return value)
*/
ENTRY(fast_syscall_spill_registers)
/* Register a FIXUP handler (pass current wb as a parameter) */
+ xsr a3, excsave1
movi a0, fast_syscall_spill_registers_fixup
s32i a0, a3, EXC_TABLE_FIXUP
- rsr a0, WINDOWBASE
+ rsr a0, windowbase
s32i a0, a3, EXC_TABLE_PARAM
+ xsr a3, excsave1 # restore a3 and excsave_1
- /* Save a3 and SAR on stack. */
+ /* Save a3, a4 and SAR on stack. */
- rsr a0, SAR
- xsr a3, EXCSAVE_1 # restore a3 and excsave_1
- s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4
+ rsr a0, sar
s32i a3, a2, PT_AREG3
+ s32i a0, a2, PT_SAR
- /* The spill routine might clobber a7, a11, and a15. */
-
- s32i a7, a2, PT_AREG5
- s32i a11, a2, PT_AREG6
- s32i a15, a2, PT_AREG7
-
- call0 _spill_registers # destroys a3, DEPC, and SAR
-
- /* Advance PC, restore registers and SAR, and return from exception. */
-
- l32i a3, a2, PT_AREG4
- l32i a0, a2, PT_AREG0
- wsr a3, SAR
- l32i a3, a2, PT_AREG3
-
- /* Restore clobbered registers. */
-
- l32i a7, a2, PT_AREG5
- l32i a11, a2, PT_AREG6
- l32i a15, a2, PT_AREG7
-
- movi a2, 0
- rfe
-
-/* Fixup handler.
- *
- * We get here if the spill routine causes an exception, e.g. tlb miss.
- * We basically restore WINDOWBASE and WINDOWSTART to the condition when
- * we entered the spill routine and jump to the user exception handler.
- *
- * a0: value of depc, original value in depc
- * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
- * a3: exctable, original value in excsave1
- */
-
-fast_syscall_spill_registers_fixup:
-
- rsr a2, WINDOWBASE # get current windowbase (a2 is saved)
- xsr a0, DEPC # restore depc and a0
- ssl a2 # set shift (32 - WB)
-
- /* We need to make sure the current registers (a0-a3) are preserved.
- * To do this, we simply set the bit for the current window frame
- * in WS, so that the exception handlers save them to the task stack.
- */
-
- rsr a3, EXCSAVE_1 # get spill-mask
- slli a2, a3, 1 # shift left by one
-
- slli a3, a2, 32-WSBITS
- src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
- wsr a2, WINDOWSTART # set corrected windowstart
-
- movi a3, exc_table
- l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
- l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
-
- /* Return to the original (user task) WINDOWBASE.
- * We leave the following frame behind:
- * a0, a1, a2 same
- * a3: trashed (saved in excsave_1)
- * depc: depc (we have to return to that address)
- * excsave_1: a3
- */
-
- wsr a3, WINDOWBASE
- rsync
-
- /* We are now in the original frame when we entered _spill_registers:
- * a0: return address
- * a1: used, stack pointer
- * a2: kernel stack pointer
- * a3: available, saved in EXCSAVE_1
- * depc: exception address
- * excsave: a3
- * Note: This frame might be the same as above.
- */
-
-#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
- /* Restore registers we precautiously saved.
- * We have the value of the 'right' a3
- */
-
- l32i a7, a2, PT_AREG5
- l32i a11, a2, PT_AREG6
- l32i a15, a2, PT_AREG7
-#endif
+ /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
- /* Setup stack pointer. */
-
- addi a2, a2, -PT_USER_SIZE
- s32i a0, a2, PT_AREG0
-
- /* Make sure we return to this fixup handler. */
-
- movi a3, fast_syscall_spill_registers_fixup_return
- s32i a3, a2, PT_DEPC # setup depc
-
- /* Jump to the exception handler. */
-
- movi a3, exc_table
- rsr a0, EXCCAUSE
- addx4 a0, a0, a3 # find entry in table
- l32i a0, a0, EXC_TABLE_FAST_USER # load handler
- jx a0
-
-fast_syscall_spill_registers_fixup_return:
-
- /* When we return here, all registers have been restored (a2: DEPC) */
-
- wsr a2, DEPC # exception address
-
- /* Restore fixup handler. */
-
- xsr a3, EXCSAVE_1
- movi a2, fast_syscall_spill_registers_fixup
- s32i a2, a3, EXC_TABLE_FIXUP
- rsr a2, WINDOWBASE
- s32i a2, a3, EXC_TABLE_PARAM
- l32i a2, a3, EXC_TABLE_KSTK
-
-#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
- /* Save registers again that might be clobbered. */
-
- s32i a7, a2, PT_AREG5
- s32i a11, a2, PT_AREG6
- s32i a15, a2, PT_AREG7
-#endif
-
- /* Load WB at the time the exception occurred. */
-
- rsr a3, SAR # WB is still in SAR
- neg a3, a3
- wsr a3, WINDOWBASE
- rsync
-
- /* Restore a3 and return. */
-
- movi a3, exc_table
- xsr a3, EXCSAVE_1
-
- rfde
-
-
-/*
- * spill all registers.
- *
- * This is not a real function. The following conditions must be met:
- *
- * - must be called with call0.
- * - uses DEPC, a3 and SAR.
- * - the last 'valid' register of each frame are clobbered.
- * - the caller must have registered a fixup handler
- * (or be inside a critical section)
- * - PS_EXCM must be set (PS_WOE cleared?)
- */
-
-ENTRY(_spill_registers)
+ s32i a4, a2, PT_AREG4
+ s32i a7, a2, PT_AREG7
+ s32i a8, a2, PT_AREG8
+ s32i a11, a2, PT_AREG11
+ s32i a12, a2, PT_AREG12
+ s32i a15, a2, PT_AREG15
/*
* Rotate ws so that the current windowbase is at bit 0.
* Assume ws = xxxwww1yy (www1 current window frame).
- * Rotate ws right so that a2 = yyxxxwww1.
+ * Rotate ws right so that a4 = yyxxxwww1.
*/
- wsr a2, DEPC # preserve a2
- rsr a2, WINDOWBASE
- rsr a3, WINDOWSTART
- ssr a2 # holds WB
- slli a2, a3, WSBITS
- or a3, a3, a2 # a2 = xxxwww1yyxxxwww1yy
- srl a3, a3
+ rsr a0, windowbase
+ rsr a3, windowstart # a3 = xxxwww1yy
+ ssr a0 # holds WB
+ slli a0, a3, WSBITS
+ or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy
+ srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
/* We are done if there are no more than the current register frame. */
- extui a3, a3, 1, WSBITS-2 # a3 = 0yyxxxwww
- movi a2, (1 << (WSBITS-1))
+ extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
+ movi a0, (1 << (WSBITS-1))
_beqz a3, .Lnospill # only one active frame? jump
/* We want 1 at the top, so that we return to the current windowbase */
- or a3, a3, a2 # 1yyxxxwww
+ or a3, a3, a0 # 1yyxxxwww
/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
- wsr a3, WINDOWSTART # save shifted windowstart
- neg a2, a3
- and a3, a2, a3 # first bit set from right: 000010000
+ wsr a3, windowstart # save shifted windowstart
+ neg a0, a3
+ and a3, a0, a3 # first bit set from right: 000010000
- ffs_ws a2, a3 # a2: shifts to skip empty frames
+ ffs_ws a0, a3 # a0: shifts to skip empty frames
movi a3, WSBITS
- sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right
- ssr a2 # save in SAR for later.
+ sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right
+ ssr a0 # save in SAR for later.
- rsr a3, WINDOWBASE
- add a3, a3, a2
- rsr a2, DEPC # restore a2
- wsr a3, WINDOWBASE
+ rsr a3, windowbase
+ add a3, a3, a0
+ wsr a3, windowbase
rsync
- rsr a3, WINDOWSTART
+ rsr a3, windowstart
srl a3, a3 # shift windowstart
/* WB is now just one frame below the oldest frame in the register
@@ -1389,19 +1142,6 @@ ENTRY(_spill_registers)
* we have to save 4,8. or 12 registers.
*/
- _bbsi.l a3, 1, .Lc4
- _bbsi.l a3, 2, .Lc8
-
- /* Special case: we have a call12-frame starting at a4. */
-
- _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
-
- s32e a4, a1, -16 # a1 is valid with an empty spill area
- l32e a4, a5, -12
- s32e a8, a4, -48
- mov a8, a4
- l32e a4, a1, -16
- j .Lc12c
.Lloop: _bbsi.l a3, 1, .Lc4
_bbci.l a3, 2, .Lc12
@@ -1415,22 +1155,10 @@ ENTRY(_spill_registers)
s32e a9, a4, -28
s32e a10, a4, -24
s32e a11, a4, -20
-
srli a11, a3, 2 # shift windowbase by 2
rotw 2
_bnei a3, 1, .Lloop
-
-.Lexit: /* Done. Do the final rotation, set WS, and return. */
-
- rotw 1
- rsr a3, WINDOWBASE
- ssl a3
- movi a3, 1
- sll a3, a3
- wsr a3, WINDOWSTART
-
-.Lnospill:
- jx a0
+ j .Lexit
.Lc4: s32e a4, a9, -16
s32e a5, a9, -12
@@ -1446,11 +1174,11 @@ ENTRY(_spill_registers)
/* 12-register frame (call12) */
- l32e a2, a5, -12
- s32e a8, a2, -48
- mov a8, a2
+ l32e a0, a5, -12
+ s32e a8, a0, -48
+ mov a8, a0
-.Lc12c: s32e a9, a8, -44
+ s32e a9, a8, -44
s32e a10, a8, -40
s32e a11, a8, -36
s32e a12, a8, -32
@@ -1470,61 +1198,209 @@ ENTRY(_spill_registers)
*/
rotw 1
- mov a5, a13
+ mov a4, a13
rotw -1
- s32e a4, a9, -16
- s32e a5, a9, -12
- s32e a6, a9, -8
- s32e a7, a9, -4
+ s32e a4, a8, -16
+ s32e a5, a8, -12
+ s32e a6, a8, -8
+ s32e a7, a8, -4
rotw 3
_beqi a3, 1, .Lexit
j .Lloop
-.Linvalid_mask:
+.Lexit:
- /* We get here because of an unrecoverable error in the window
- * registers. If we are in user space, we kill the application,
- * however, this condition is unrecoverable in kernel space.
- */
+ /* Done. Do the final rotation and set WS */
+
+ rotw 1
+ rsr a3, windowbase
+ ssl a3
+ movi a3, 1
+ sll a3, a3
+ wsr a3, windowstart
+.Lnospill:
+
+ /* Advance PC, restore registers and SAR, and return from exception. */
+
+ l32i a3, a2, PT_SAR
+ l32i a0, a2, PT_AREG0
+ wsr a3, sar
+ l32i a3, a2, PT_AREG3
+
+ /* Restore clobbered registers. */
+
+ l32i a4, a2, PT_AREG4
+ l32i a7, a2, PT_AREG7
+ l32i a8, a2, PT_AREG8
+ l32i a11, a2, PT_AREG11
+ l32i a12, a2, PT_AREG12
+ l32i a15, a2, PT_AREG15
- rsr a0, PS
- _bbci.l a0, PS_UM_SHIFT, 1f
+ movi a2, 0
+ rfe
+
+.Linvalid_mask:
- /* User space: Setup a dummy frame and kill application.
+ /* We get here because of an unrecoverable error in the window
+ * registers, so set up a dummy frame and kill the user application.
* Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
*/
movi a0, 1
movi a1, 0
- wsr a0, WINDOWSTART
- wsr a1, WINDOWBASE
+ wsr a0, windowstart
+ wsr a1, windowbase
rsync
movi a0, 0
- movi a3, exc_table
+ rsr a3, excsave1
l32i a1, a3, EXC_TABLE_KSTK
- wsr a3, EXCSAVE_1
- movi a4, PS_WOE_MASK | 1
- wsr a4, PS
+ movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
+ wsr a4, ps
rsync
movi a6, SIGSEGV
movi a4, do_exit
callx4 a4
-1: /* Kernel space: PANIC! */
+ /* shouldn't return, so panic */
- wsr a0, EXCSAVE_1
+ wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0 # should not return
1: j 1b
+
+ENDPROC(fast_syscall_spill_registers)
+
+/* Fixup handler.
+ *
+ * We get here if the spill routine causes an exception, e.g. tlb miss.
+ * We basically restore WINDOWBASE and WINDOWSTART to the condition when
+ * we entered the spill routine and jump to the user exception handler.
+ *
+ * Note that we only need to restore the bits in windowstart that have not
+ * been spilled yet by the _spill_register routine. Luckily, a3 contains a
+ * rotated windowstart with only those bits set for frames that haven't been
+ * spilled yet. Because a3 is rotated such that bit 0 represents the register
+ * frame for the current windowbase - 1, we need to rotate a3 left by the
+ * value of the current windowbase + 1 and move it to windowstart.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(fast_syscall_spill_registers_fixup)
+
+ rsr a2, windowbase # get current windowbase (a2 is saved)
+ xsr a0, depc # restore depc and a0
+ ssl a2 # set shift (32 - WB)
+
+ /* We need to make sure the current registers (a0-a3) are preserved.
+ * To do this, we simply set the bit for the current window frame
+ * in WS, so that the exception handlers save them to the task stack.
+ *
+ * Note: we use a3 to set the windowbase, so we take a special care
+ * of it, saving it in the original _spill_registers frame across
+ * the exception handler call.
+ */
+
+ xsr a3, excsave1 # get spill-mask
+ slli a3, a3, 1 # shift left by one
+ addi a3, a3, 1 # set the bit for the current window frame
+
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
+ wsr a2, windowstart # set corrected windowstart
+
+ srli a3, a3, 1
+ rsr a2, excsave1
+ l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
+ xsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
+ l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
+ xsr a2, excsave1
+
+ /* Return to the original (user task) WINDOWBASE.
+ * We leave the following frame behind:
+ * a0, a1, a2 same
+ * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
+ * depc: depc (we have to return to that address)
+ * excsave_1: exctable
+ */
+
+ wsr a3, windowbase
+ rsync
+
+ /* We are now in the original frame when we entered _spill_registers:
+ * a0: return address
+ * a1: used, stack pointer
+ * a2: kernel stack pointer
+ * a3: available
+ * depc: exception address
+ * excsave: exctable
+ * Note: This frame might be the same as above.
+ */
+
+ /* Setup stack pointer. */
+
+ addi a2, a2, -PT_USER_SIZE
+ s32i a0, a2, PT_AREG0
+
+ /* Make sure we return to this fixup handler. */
+
+ movi a3, fast_syscall_spill_registers_fixup_return
+ s32i a3, a2, PT_DEPC # setup depc
+
+ /* Jump to the exception handler. */
+
+ rsr a3, excsave1
+ rsr a0, exccause
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+ jx a0
+
+ENDPROC(fast_syscall_spill_registers_fixup)
+
+ENTRY(fast_syscall_spill_registers_fixup_return)
+
+ /* When we return here, all registers have been restored (a2: DEPC) */
+
+ wsr a2, depc # exception address
+
+ /* Restore fixup handler. */
+
+ rsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
+ movi a3, fast_syscall_spill_registers_fixup
+ s32i a3, a2, EXC_TABLE_FIXUP
+ rsr a3, windowbase
+ s32i a3, a2, EXC_TABLE_PARAM
+ l32i a2, a2, EXC_TABLE_KSTK
+
+ /* Load WB at the time the exception occurred. */
+
+ rsr a3, sar # WB is still in SAR
+ neg a3, a3
+ wsr a3, windowbase
+ rsync
+
+ rsr a3, excsave1
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+
+ rfde
+
+ENDPROC(fast_syscall_spill_registers_fixup_return)
+
+#ifdef CONFIG_MMU
/*
* We should never get here. Bail out!
*/
@@ -1535,6 +1411,8 @@ ENTRY(fast_second_level_miss_double_kernel)
callx0 a0 # should not return
1: j 1b
+ENDPROC(fast_second_level_miss_double_kernel)
+
/* First-level entry handler for user, kernel, and double 2nd-level
* TLB miss exceptions. Note that for now, user and kernel miss
* exceptions share the same entry point and are handled identically.
@@ -1547,9 +1425,9 @@ ENTRY(fast_second_level_miss_double_kernel)
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -1557,9 +1435,10 @@ ENTRY(fast_second_level_miss_double_kernel)
ENTRY(fast_second_level_miss)
- /* Save a1. Note: we don't expect a double exception. */
+ /* Save a1 and a3. Note: we don't expect a double exception. */
s32i a1, a2, PT_AREG1
+ s32i a3, a2, PT_AREG3
/* We need to map the page of PTEs for the user task. Find
* the pointer to that page. Also, it's possible for tsk->mm
@@ -1581,10 +1460,9 @@ ENTRY(fast_second_level_miss)
l32i a0, a1, TASK_MM # tsk->mm
beqz a0, 9f
-8: rsr a1, EXCVADDR # fault address
- _PGD_OFFSET(a0, a1, a1)
+8: rsr a3, excvaddr # fault address
+ _PGD_OFFSET(a0, a3, a1)
l32i a0, a0, 0 # read pmdval
- //beqi a0, _PAGE_USER, 2f
beqz a0, 2f
/* Read ptevaddr and convert to top of page-table page.
@@ -1597,28 +1475,42 @@ ENTRY(fast_second_level_miss)
* The messy computation for 'pteval' above really simplifies
* into the following:
*
- * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL
+ * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
*/
- movi a1, -PAGE_OFFSET
+ movi a1, (-PAGE_OFFSET) & 0xffffffff
add a0, a0, a1 # pmdval - PAGE_OFFSET
extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
xor a0, a0, a1
-
- movi a1, PAGE_DIRECTORY
+ movi a1, _PAGE_DIRECTORY
or a0, a0, a1 # ... | PAGE_DIRECTORY
- rsr a1, PTEVADDR
+ /*
+ * We utilize all three wired-ways (7-9) to hold pmd translations.
+ * Memory regions are mapped to the DTLBs according to bits 28 and 29.
+ * This allows to map the three most common regions to three different
+ * DTLBs:
+ * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
+ * 2 -> way 8 shared libaries (2000.0000)
+ * 3 -> way 0 stack (3000.0000)
+ */
+
+ extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
+ rsr a1, ptevaddr
+ addx2 a3, a3, a3 # -> 0,3,6,9
srli a1, a1, PAGE_SHIFT
+ extui a3, a3, 2, 2 # -> 0,0,1,2
slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
- addi a1, a1, DTLB_WAY_PGTABLE # ... + way_number
+ addi a3, a3, DTLB_WAY_PGD
+ add a1, a1, a3 # ... + way_number
- wdtlb a0, a1
+3: wdtlb a0, a1
dsync
/* Exit critical section. */
+4: rsr a3, excsave1
movi a0, 0
s32i a0, a3, EXC_TABLE_FIXUP
@@ -1626,38 +1518,104 @@ ENTRY(fast_second_level_miss)
l32i a0, a2, PT_AREG0
l32i a1, a2, PT_AREG1
+ l32i a3, a2, PT_AREG3
l32i a2, a2, PT_DEPC
- xsr a3, EXCSAVE_1
bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
/* Restore excsave1 and return. */
- rsr a2, DEPC
+ rsr a2, depc
rfe
/* Return from double exception. */
-1: xsr a2, DEPC
+1: xsr a2, depc
esync
rfde
9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
j 8b
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+2: /* Special case for cache aliasing.
+ * We (should) only get here if a clear_user_page, copy_user_page
+ * or the aliased cache flush functions got preemptively interrupted
+ * by another task. Re-establish temporary mapping to the
+ * TLBTEMP_BASE areas.
+ */
+
+ /* We shouldn't be in a double exception */
+
+ l32i a0, a2, PT_DEPC
+ bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
+
+ /* Make sure the exception originated in the special functions */
+
+ movi a0, __tlbtemp_mapping_start
+ rsr a3, epc1
+ bltu a3, a0, 2f
+ movi a0, __tlbtemp_mapping_end
+ bgeu a3, a0, 2f
+
+ /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
+
+ movi a3, TLBTEMP_BASE_1
+ rsr a0, excvaddr
+ bltu a0, a3, 2f
+
+ addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
+ bgeu a1, a3, 2f
+
+ /* Check if we have to restore an ITLB mapping. */
+
+ movi a1, __tlbtemp_mapping_itlb
+ rsr a3, epc1
+ sub a3, a3, a1
+
+ /* Calculate VPN */
+
+ movi a1, PAGE_MASK
+ and a1, a1, a0
+
+ /* Jump for ITLB entry */
+
+ bgez a3, 1f
+
+ /* We can use up to two TLBTEMP areas, one for src and one for dst. */
+
+ extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
+ add a1, a3, a1
+
+ /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
+
+ mov a0, a6
+ movnez a0, a7, a3
+ j 3b
+
+ /* ITLB entry. We only use dst in a6. */
+
+1: witlb a6, a1
+ isync
+ j 4b
+
+
+#endif // DCACHE_WAY_SIZE > PAGE_SIZE
+
+
2: /* Invalid PGD, default exception handling */
- rsr a1, DEPC
- xsr a3, EXCSAVE_1
+ rsr a1, depc
s32i a1, a2, PT_AREG2
- s32i a3, a2, PT_AREG3
mov a1, a2
- rsr a2, PS
- bbsi.l a2, PS_UM_SHIFT, 1f
+ rsr a2, ps
+ bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception
1: j _user_exception
+ENDPROC(fast_second_level_miss)
/*
* StoreProhibitedException
@@ -1669,9 +1627,9 @@ ENTRY(fast_second_level_miss)
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -1679,64 +1637,64 @@ ENTRY(fast_second_level_miss)
ENTRY(fast_store_prohibited)
- /* Save a1 and a4. */
+ /* Save a1 and a3. */
s32i a1, a2, PT_AREG1
- s32i a4, a2, PT_AREG4
+ s32i a3, a2, PT_AREG3
GET_CURRENT(a1,a2)
l32i a0, a1, TASK_MM # tsk->mm
beqz a0, 9f
-8: rsr a1, EXCVADDR # fault address
- _PGD_OFFSET(a0, a1, a4)
+8: rsr a1, excvaddr # fault address
+ _PGD_OFFSET(a0, a1, a3)
l32i a0, a0, 0
- //beqi a0, _PAGE_USER, 2f # FIXME use _PAGE_INVALID
beqz a0, 2f
- _PTE_OFFSET(a0, a1, a4)
- l32i a4, a0, 0 # read pteval
- movi a1, _PAGE_VALID | _PAGE_RW
- bnall a4, a1, 2f
+ /*
+ * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
+ * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
+ */
+
+ _PTE_OFFSET(a0, a1, a3)
+ l32i a3, a0, 0 # read pteval
+ movi a1, _PAGE_CA_INVALID
+ ball a3, a1, 2f
+ bbci.l a3, _PAGE_WRITABLE_BIT, 2f
- movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE
- or a4, a4, a1
- rsr a1, EXCVADDR
- s32i a4, a0, 0
+ movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
+ or a3, a3, a1
+ rsr a1, excvaddr
+ s32i a3, a0, 0
/* We need to flush the cache if we have page coloring. */
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
dhwb a0, 0
#endif
pdtlb a0, a1
- beqz a0, 1f
- idtlb a0 // FIXME do we need this?
- wdtlb a4, a0
-1:
+ wdtlb a3, a0
/* Exit critical section. */
movi a0, 0
+ rsr a3, excsave1
s32i a0, a3, EXC_TABLE_FIXUP
/* Restore the working registers, and return. */
- l32i a4, a2, PT_AREG4
+ l32i a3, a2, PT_AREG3
l32i a1, a2, PT_AREG1
l32i a0, a2, PT_AREG0
l32i a2, a2, PT_DEPC
- /* Restore excsave1 and a3. */
-
- xsr a3, EXCSAVE_1
bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
- rsr a2, DEPC
+ rsr a2, depc
rfe
/* Double exception. Restore FIXUP handler and return. */
-1: xsr a2, DEPC
+1: xsr a2, depc
esync
rfde
@@ -1745,166 +1703,111 @@ ENTRY(fast_store_prohibited)
2: /* If there was a problem, handle fault in C */
- rsr a4, DEPC # still holds a2
- xsr a3, EXCSAVE_1
- s32i a4, a2, PT_AREG2
- s32i a3, a2, PT_AREG3
- l32i a4, a2, PT_AREG4
+ rsr a3, depc # still holds a2
+ s32i a3, a2, PT_AREG2
mov a1, a2
- rsr a2, PS
- bbsi.l a2, PS_UM_SHIFT, 1f
+ rsr a2, ps
+ bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception
1: j _user_exception
+ENDPROC(fast_store_prohibited)
-#if XCHAL_EXTRA_SA_SIZE
-
-#warning fast_coprocessor untested
+#endif /* CONFIG_MMU */
/*
- * Entry condition:
+ * System Calls.
*
- * a0: trashed, original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original in DEPC
- * a3: dispatch table
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
- *
- * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
- * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ * void system_call (struct pt_regs* regs, int exccause)
+ * a2 a3
*/
-ENTRY(fast_coprocessor_double)
- wsr a0, EXCSAVE_1
- movi a0, unrecoverable_exception
- callx0 a0
-
-ENTRY(fast_coprocessor)
-
- /* Fatal if we are in a double exception. */
-
- l32i a0, a2, PT_DEPC
- _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double
-
- /* Save some registers a1, a3, a4, SAR */
-
- xsr a3, EXCSAVE_1
- s32i a3, a2, PT_AREG3
- rsr a3, SAR
- s32i a4, a2, PT_AREG4
- s32i a1, a2, PT_AREG1
- s32i a5, a1, PT_AREG5
- s32i a3, a2, PT_SAR
- mov a1, a2
-
- /* Currently, the HAL macros only guarantee saving a0 and a1.
- * These can and will be refined in the future, but for now,
- * just save the remaining registers of a2...a15.
- */
- s32i a6, a1, PT_AREG6
- s32i a7, a1, PT_AREG7
- s32i a8, a1, PT_AREG8
- s32i a9, a1, PT_AREG9
- s32i a10, a1, PT_AREG10
- s32i a11, a1, PT_AREG11
- s32i a12, a1, PT_AREG12
- s32i a13, a1, PT_AREG13
- s32i a14, a1, PT_AREG14
- s32i a15, a1, PT_AREG15
-
- /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
-
- rsr a0, EXCCAUSE
- addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED
+ENTRY(system_call)
- /* Set corresponding CPENABLE bit */
+ entry a1, 32
- movi a4, 1
- ssl a3 # SAR: 32 - coprocessor_number
- rsr a5, CPENABLE
- sll a4, a4
- or a4, a5, a4
- wsr a4, CPENABLE
- rsync
- movi a5, coprocessor_info # list of owner and offset into cp_save
- addx8 a0, a4, a5 # entry for CP
+ /* regs->syscall = regs->areg[2] */
- bne a4, a5, .Lload # bit wasn't set before, cp not in use
+ l32i a3, a2, PT_AREG2
+ mov a6, a2
+ movi a4, do_syscall_trace_enter
+ s32i a3, a2, PT_SYSCALL
+ callx4 a4
- /* Now compare the current task with the owner of the coprocessor.
- * If they are the same, there is no reason to save or restore any
- * coprocessor state. Having already enabled the coprocessor,
- * branch ahead to return.
- */
- GET_CURRENT(a5,a1)
- l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP
- beq a4, a5, .Ldone
+ /* syscall = sys_call_table[syscall_nr] */
- /* Find location to dump current coprocessor state:
- * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor]
- *
- * Note: a0 pointer to the entry in the coprocessor owner table,
- * a3 coprocessor number,
- * a4 current owner of coprocessor.
- */
- l32i a5, a0, COPROCESSOR_INFO_OFFSET
- addi a2, a4, THREAD_CP_SAVE
- add a2, a2, a5
+ movi a4, sys_call_table;
+ movi a5, __NR_syscall_count
+ movi a6, -ENOSYS
+ bgeu a3, a5, 1f
- /* Store current coprocessor states. (a5 still has CP number) */
+ addx4 a4, a3, a4
+ l32i a4, a4, 0
+ movi a5, sys_ni_syscall;
+ beq a4, a5, 1f
- xchal_cpi_store_funcbody
+ /* Load args: arg0 - arg5 are passed via regs. */
- /* The macro might have destroyed a3 (coprocessor number), but
- * SAR still has 32 - coprocessor_number!
- */
- movi a3, 32
- rsr a4, SAR
- sub a3, a3, a4
+ l32i a6, a2, PT_AREG6
+ l32i a7, a2, PT_AREG3
+ l32i a8, a2, PT_AREG4
+ l32i a9, a2, PT_AREG5
+ l32i a10, a2, PT_AREG8
+ l32i a11, a2, PT_AREG9
-.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into
- * the coprocessor owner table.
- *
- * Note: a0 pointer to the entry in the coprocessor owner table,
- * a3 coprocessor number.
- */
- GET_CURRENT(a4,a1)
- s32i a4, a0, 0
+ /* Pass one additional argument to the syscall: pt_regs (on stack) */
+ s32i a2, a1, 0
- /* Find location from where to restore the current coprocessor state.*/
+ callx4 a4
- l32i a5, a0, COPROCESSOR_INFO_OFFSET
- addi a2, a4, THREAD_CP_SAVE
- add a2, a2, a4
+1: /* regs->areg[2] = return_value */
- xchal_cpi_load_funcbody
+ s32i a6, a2, PT_AREG2
+ movi a4, do_syscall_trace_leave
+ mov a6, a2
+ callx4 a4
+ retw
- /* We must assume that the xchal_cpi_store_funcbody macro destroyed
- * registers a2..a15.
- */
+ENDPROC(system_call)
-.Ldone: l32i a15, a1, PT_AREG15
- l32i a14, a1, PT_AREG14
- l32i a13, a1, PT_AREG13
- l32i a12, a1, PT_AREG12
- l32i a11, a1, PT_AREG11
- l32i a10, a1, PT_AREG10
- l32i a9, a1, PT_AREG9
- l32i a8, a1, PT_AREG8
- l32i a7, a1, PT_AREG7
- l32i a6, a1, PT_AREG6
- l32i a5, a1, PT_AREG5
- l32i a4, a1, PT_AREG4
- l32i a3, a1, PT_AREG3
- l32i a2, a1, PT_AREG2
- l32i a0, a1, PT_AREG0
- l32i a1, a1, PT_AREG1
-
- rfe
+/*
+ * Spill live registers on the kernel stack macro.
+ *
+ * Entry condition: ps.woe is set, ps.excm is cleared
+ * Exit condition: windowstart has single bit set
+ * May clobber: a12, a13
+ */
+ .macro spill_registers_kernel
-#endif /* XCHAL_EXTRA_SA_SIZE */
+#if XCHAL_NUM_AREGS > 16
+ call12 1f
+ _j 2f
+ retw
+ .align 4
+1:
+ _entry a1, 48
+ addi a12, a0, 3
+#if XCHAL_NUM_AREGS > 32
+ .rept (XCHAL_NUM_AREGS - 32) / 12
+ _entry a1, 48
+ mov a12, a0
+ .endr
+#endif
+ _entry a1, 48
+#if XCHAL_NUM_AREGS % 12 == 0
+ mov a8, a8
+#elif XCHAL_NUM_AREGS % 12 == 4
+ mov a12, a12
+#elif XCHAL_NUM_AREGS % 12 == 8
+ mov a4, a4
+#endif
+ retw
+2:
+#else
+ mov a12, a12
+#endif
+ .endm
/*
* Task switch.
@@ -1917,20 +1820,35 @@ ENTRY(_switch_to)
entry a1, 16
- mov a4, a3 # preserve a3
+ mov a10, a2 # preserve 'prev' (a2)
+ mov a11, a3 # and 'next' (a3)
- s32i a0, a2, THREAD_RA # save return address
- s32i a1, a2, THREAD_SP # save stack pointer
+ l32i a4, a2, TASK_THREAD_INFO
+ l32i a5, a3, TASK_THREAD_INFO
- /* Disable ints while we manipulate the stack pointer; spill regs. */
+ save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
- movi a5, PS_EXCM_MASK | LOCKLEVEL
- xsr a5, PS
- rsr a3, EXCSAVE_1
+ s32i a0, a10, THREAD_RA # save return address
+ s32i a1, a10, THREAD_SP # save stack pointer
+
+ /* Disable ints while we manipulate the stack pointer. */
+
+ rsil a14, LOCKLEVEL
+ rsr a3, excsave1
rsync
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
- call0 _spill_registers
+ /* Switch CPENABLE */
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ l32i a3, a5, THREAD_CPENABLE
+ xsr a3, cpenable
+ s32i a3, a4, THREAD_CPENABLE
+#endif
+
+ /* Flush register file. */
+
+ spill_registers_kernel
/* Set kernel stack (and leave critical section)
* Note: It's save to set it here. The stack will not be overwritten
@@ -1938,23 +1856,26 @@ ENTRY(_switch_to)
* we return from kernel space.
*/
- l32i a0, a4, TASK_THREAD_INFO
- rsr a3, EXCSAVE_1 # exc_table
- movi a1, 0
- addi a0, a0, PT_REGS_OFFSET
- s32i a1, a3, EXC_TABLE_FIXUP
- s32i a0, a3, EXC_TABLE_KSTK
+ rsr a3, excsave1 # exc_table
+ movi a6, 0
+ addi a7, a5, PT_REGS_OFFSET
+ s32i a6, a3, EXC_TABLE_FIXUP
+ s32i a7, a3, EXC_TABLE_KSTK
+
+ /* restore context of the task 'next' */
- /* restore context of the task that 'next' addresses */
+ l32i a0, a11, THREAD_RA # restore return address
+ l32i a1, a11, THREAD_SP # restore stack pointer
- l32i a0, a4, THREAD_RA /* restore return address */
- l32i a1, a4, THREAD_SP /* restore stack pointer */
+ load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
- wsr a5, PS
+ wsr a14, ps
+ mov a2, a10 # return 'prev'
rsync
retw
+ENDPROC(_switch_to)
ENTRY(ret_from_fork)
@@ -1964,33 +1885,24 @@ ENTRY(ret_from_fork)
movi a4, schedule_tail
callx4 a4
- movi a4, do_syscall_trace
+ movi a4, do_syscall_trace_leave
+ mov a6, a1
callx4 a4
j common_exception_return
-
+ENDPROC(ret_from_fork)
/*
- * Table of syscalls
+ * Kernel thread creation helper
+ * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
+ * left from _switch_to: a6 = prev
*/
+ENTRY(ret_from_kernel_thread)
-.data
-.align 4
-.global sys_call_table
-sys_call_table:
-
-#define SYSCALL(call, narg) .word call
-#include "syscalls.h"
-
-/*
- * Number of arguments of each syscall
- */
-
-.global sys_narg_table
-sys_narg_table:
-
-#undef SYSCALL
-#define SYSCALL(call, narg) .byte narg
-#include "syscalls.h"
+ call4 schedule_tail
+ mov a6, a3
+ callx4 a2
+ j common_exception_return
+ENDPROC(ret_from_kernel_thread)
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index c07cb252299..aeeb3cc8a41 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
@@ -15,9 +15,14 @@
* Kevin Chea
*/
-#include <xtensa/cacheasm.h>
#include <asm/processor.h>
#include <asm/page.h>
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
+
+#include <linux/init.h>
+#include <linux/linkage.h>
/*
* This module contains the entry code for kernel images. It performs the
@@ -32,13 +37,6 @@
*
*/
- .macro iterate from, to , cmd
- .ifeq ((\to - \from) & ~0xfff)
- \cmd \from
- iterate "(\from+1)", \to, \cmd
- .endif
- .endm
-
/*
* _start
*
@@ -50,35 +48,66 @@
* instruction.
*/
- .section .head.text, "ax"
- .globl _start
-_start: _j 2f
- .align 4
-1: .word _startup
-2: l32r a0, 1b
- jx a0
-
- .text
- .align 4
-_startup:
+ __HEAD
+ .begin no-absolute-literals
- /* Disable interrupts and exceptions. */
-
- movi a0, XCHAL_PS_EXCM_MASK
- wsr a0, PS
+ENTRY(_start)
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
+ wsr a2, excsave1
+ _j _SetupOCD
- wsr a2, EXCSAVE_1
-
- /* Start with a fresh windowbase and windowstart. */
+ .align 4
+ .literal_position
+.Lstartup:
+ .word _startup
+ .align 4
+_SetupOCD:
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
movi a1, 1
movi a0, 0
- wsr a1, WINDOWSTART
- wsr a0, WINDOWBASE
+ wsr a1, windowstart
+ wsr a0, windowbase
rsync
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+ .global _SetupMMU
+_SetupMMU:
+ Offset = _SetupMMU - _start
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ rsr a2, excsave1
+ movi a3, 0x08000000
+ bgeu a2, a3, 1f
+ movi a3, 0xd0000000
+ add a2, a2, a3
+ wsr a2, excsave1
+1:
+#endif
+#endif
+ .end no-absolute-literals
+
+ l32r a0, .Lstartup
+ jx a0
+
+ENDPROC(_start)
+
+ __REF
+ .literal_position
+
+ENTRY(_startup)
+
/* Set a0 to 0 for the remaining initialization. */
movi a0, 0
@@ -86,65 +115,97 @@ _startup:
/* Clear debugging registers. */
#if XCHAL_HAVE_DEBUG
- wsr a0, IBREAKENABLE
- wsr a0, ICOUNT
+#if XCHAL_NUM_IBREAK > 0
+ wsr a0, ibreakenable
+#endif
+ wsr a0, icount
movi a1, 15
- wsr a0, ICOUNTLEVEL
-
- .macro reset_dbreak num
- wsr a0, DBREAKC + \num
- .endm
+ wsr a0, icountlevel
- iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak
+ .set _index, 0
+ .rept XCHAL_NUM_DBREAK - 1
+ wsr a0, SREG_DBREAKC + _index
+ .set _index, _index + 1
+ .endr
#endif
/* Clear CCOUNT (not really necessary, but nice) */
- wsr a0, CCOUNT # not really necessary, but nice
+ wsr a0, ccount # not really necessary, but nice
/* Disable zero-loops. */
#if XCHAL_HAVE_LOOPS
- wsr a0, LCOUNT
+ wsr a0, lcount
#endif
/* Disable all timers. */
- .macro reset_timer num
- wsr a0, CCOMPARE_0 + \num
- .endm
- iterate 0, XCHAL_NUM_TIMERS-1, reset_timer
+ .set _index, 0
+ .rept XCHAL_NUM_TIMERS
+ wsr a0, SREG_CCOMPARE + _index
+ .set _index, _index + 1
+ .endr
/* Interrupt initialization. */
movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
- wsr a0, INTENABLE
- wsr a2, INTCLEAR
+ wsr a0, intenable
+ wsr a2, intclear
/* Disable coprocessors. */
-#if XCHAL_CP_NUM > 0
- wsr a0, CPENABLE
+#if XCHAL_HAVE_CP
+ wsr a0, cpenable
#endif
- /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
- *
- * Note: PS.EXCM must be cleared before using any loop
- * instructions; otherwise, they are silently disabled, and
- * at most one iteration of the loop is executed.
+ /* Initialize the caches.
+ * a2, a3 are just working registers (clobbered).
*/
- movi a1, 1
- wsr a1, PS
+#if XCHAL_DCACHE_LINE_LOCKABLE
+ ___unlock_dcache_all a2 a3
+#endif
+
+#if XCHAL_ICACHE_LINE_LOCKABLE
+ ___unlock_icache_all a2 a3
+#endif
+
+ ___invalidate_dcache_all a2 a3
+ ___invalidate_icache_all a2 a3
+
+ isync
+
+#ifdef CONFIG_HAVE_SMP
+ movi a2, CCON # MX External Register to Configure Cache
+ movi a3, 1
+ wer a3, a2
+#endif
+
+ /* Setup stack and enable window exceptions (keep irqs disabled) */
+
+ movi a1, start_info
+ l32i a1, a1, 0
+
+ movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL
+ # WOE=1, INTLEVEL=LOCKLEVEL, UM=0
+ wsr a2, ps # (enable reg-windows; progmode stack)
rsync
- /* Initialize the caches.
- * Does not include flushing writeback d-cache.
- * a6, a7 are just working registers (clobbered).
+ /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
+
+ movi a2, debug_exception
+ wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+
+#ifdef CONFIG_SMP
+ /*
+ * Notice that we assume with SMP that cores have PRID
+ * supported by the cores.
*/
+ rsr a2, prid
+ bnez a2, .Lboot_secondary
- icache_reset a2, a3
- dcache_reset a2, a3
+#endif /* CONFIG_SMP */
/* Unpack data sections
*
@@ -178,12 +239,12 @@ _startup:
* Now clear the BSS segment.
*/
- movi a2, _bss_start # start of BSS
- movi a3, _bss_end # end of BSS
+ movi a2, __bss_start # start of BSS
+ movi a3, __bss_stop # end of BSS
-1: addi a2, a2, 4
+ __loopt a2, a3, a4, 2
s32i a0, a2, 0
- blt a2, a3, 1b
+ __endla a2, a4, 4
#if XCHAL_DCACHE_IS_WRITEBACK
@@ -191,27 +252,15 @@ _startup:
* instructions/data are available.
*/
- dcache_writeback_all a2, a3
+ ___flush_dcache_all a2 a3
#endif
+ memw
+ isync
+ ___invalidate_icache_all a2 a3
+ isync
- /* Setup stack and enable window exceptions (keep irqs disabled) */
-
- movi a1, init_thread_union
- addi a1, a1, KERNEL_STACK_SIZE
-
- movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
- wsr a2, PS # (enable reg-windows; progmode stack)
- rsync
-
- /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
-
- movi a2, debug_exception
- wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL
-
- /* Set up EXCSAVE[1] to point to the exc_table. */
-
- movi a6, exc_table
- xsr a6, EXCSAVE_1
+ movi a6, 0
+ xsr a6, excsave1
/* init_arch kick-starts the linux kernel */
@@ -224,13 +273,103 @@ _startup:
should_never_return:
j should_never_return
- /* Define some common data structures here. We define them
- * here in this assembly file due to their unusual alignment
- * requirements.
+#ifdef CONFIG_SMP
+.Lboot_secondary:
+
+ movi a2, cpu_start_ccount
+1:
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ movi a3, 0
+ s32i a3, a2, 0
+ memw
+1:
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ wsr a3, ccount
+ movi a3, 0
+ s32i a3, a2, 0
+ memw
+
+ movi a6, 0
+ wsr a6, excsave1
+
+ movi a4, secondary_start_kernel
+ callx4 a4
+ j should_never_return
+
+#endif /* CONFIG_SMP */
+
+ENDPROC(_startup)
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+ENTRY(cpu_restart)
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+ ___flush_invalidate_dcache_all a2 a3
+#else
+ ___invalidate_dcache_all a2 a3
+#endif
+ memw
+ movi a2, CCON # MX External Register to Configure Cache
+ movi a3, 0
+ wer a3, a2
+ extw
+
+ rsr a0, prid
+ neg a2, a0
+ movi a3, cpu_start_id
+ s32i a2, a3, 0
+#if XCHAL_DCACHE_IS_WRITEBACK
+ dhwbi a3, 0
+#endif
+1:
+ l32i a2, a3, 0
+ dhi a3, 0
+ bne a2, a0, 1b
+
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
*/
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
- .comm swapper_pg_dir,PAGE_SIZE,PAGE_SIZE
- .comm empty_bad_page_table,PAGE_SIZE,PAGE_SIZE
- .comm empty_bad_page,PAGE_SIZE,PAGE_SIZE
- .comm empty_zero_page,PAGE_SIZE,PAGE_SIZE
+ j _startup
+ENDPROC(cpu_restart)
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * DATA section
+ */
+
+ .section ".data.init.refok"
+ .align 4
+ENTRY(start_info)
+ .long init_thread_union + KERNEL_STACK_SIZE
+
+/*
+ * BSS section
+ */
+
+__PAGE_ALIGNED_BSS
+#ifdef CONFIG_MMU
+ENTRY(swapper_pg_dir)
+ .fill PAGE_SIZE, 1, 0
+END(swapper_pg_dir)
+#endif
+ENTRY(empty_zero_page)
+ .fill PAGE_SIZE, 1, 0
+END(empty_zero_page)
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 1cf744ee095..3eee94f621e 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -4,7 +4,7 @@
* Xtensa built-in interrupt controller and some generic functions copied
* from i386.
*
- * Copyright (C) 2002 - 2005 Tensilica, Inc.
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
*
@@ -18,37 +18,26 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/xtensa-mx.h>
+#include <linux/irqchip/xtensa-pic.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <asm/mxregs.h>
#include <asm/uaccess.h>
#include <asm/platform.h>
-static void enable_xtensa_irq(unsigned int irq);
-static void disable_xtensa_irq(unsigned int irq);
-static void mask_and_ack_xtensa(unsigned int irq);
-static void end_xtensa_irq(unsigned int irq);
-
-static unsigned int cached_irq_mask;
-
atomic_t irq_err_count;
-/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves.
- */
-void ack_bad_irq(unsigned int irq)
+asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{
- printk("unexpected IRQ trap at vector %02x\n", irq);
-}
+ int irq = irq_find_mapping(NULL, hwirq);
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-
-unsigned int do_IRQ(int irq, struct pt_regs *regs)
-{
- irq_enter();
+ if (hwirq >= NR_IRQS) {
+ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
+ __func__, hwirq);
+ }
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
@@ -63,127 +52,137 @@ unsigned int do_IRQ(int irq, struct pt_regs *regs)
sp - sizeof(struct thread_info));
}
#endif
-
- __do_IRQ(irq, regs);
-
- irq_exit();
-
- return 1;
+ generic_handle_irq(irq);
}
-/*
- * Generic, controller-independent functions:
- */
-
-int show_interrupts(struct seq_file *p, void *v)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
- int i = *(loff_t *) v, j;
- struct irqaction * action;
- unsigned long flags;
-
- if (i == 0) {
- seq_printf(p, " ");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
- seq_putc(p, '\n');
- }
-
- if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
- if (!action)
- goto skip;
- seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#ifdef CONFIG_SMP
+ show_ipi_list(p, prec);
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->typename);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
-skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == NR_IRQS) {
- seq_printf(p, "NMI: ");
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", nmi_count(j));
- seq_putc(p, '\n');
- seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
- }
+ seq_printf(p, "%*s: ", prec, "ERR");
+ seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
return 0;
}
-/* shutdown is same as "disable" */
-#define shutdown_xtensa_irq disable_xtensa_irq
-static unsigned int startup_xtensa_irq(unsigned int irq)
+int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
+ unsigned long int_irq, unsigned long ext_irq,
+ unsigned long *out_hwirq, unsigned int *out_type)
{
- enable_xtensa_irq(irq);
- return 0; /* never anything pending */
+ if (WARN_ON(intsize < 1 || intsize > 2))
+ return -EINVAL;
+ if (intsize == 2 && intspec[1] == 1) {
+ int_irq = xtensa_map_ext_irq(ext_irq);
+ if (int_irq < XCHAL_NUM_INTERRUPTS)
+ *out_hwirq = int_irq;
+ else
+ return -EINVAL;
+ } else {
+ *out_hwirq = int_irq;
+ }
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
}
-static struct hw_interrupt_type xtensa_irq_type = {
- "Xtensa-IRQ",
- startup_xtensa_irq,
- shutdown_xtensa_irq,
- enable_xtensa_irq,
- disable_xtensa_irq,
- mask_and_ack_xtensa,
- end_xtensa_irq
-};
-
-static inline void mask_irq(unsigned int irq)
+int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
{
- cached_irq_mask &= ~(1 << irq);
- set_sr (cached_irq_mask, INTENABLE);
+ struct irq_chip *irq_chip = d->host_data;
+ u32 mask = 1 << hw;
+
+ if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_simple_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_edge_irq, "edge");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_percpu_irq, "timer");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
+ /* XCHAL_INTTYPE_MASK_NMI */
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ }
+ return 0;
}
-static inline void unmask_irq(unsigned int irq)
+unsigned xtensa_map_ext_irq(unsigned ext_irq)
{
- cached_irq_mask |= 1 << irq;
- set_sr (cached_irq_mask, INTENABLE);
-}
+ unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
+ unsigned i;
-static void disable_xtensa_irq(unsigned int irq)
-{
- unsigned long flags;
- local_save_flags(flags);
- mask_irq(irq);
- local_irq_restore(flags);
+ for (i = 0; mask; ++i, mask >>= 1) {
+ if ((mask & 1) && ext_irq-- == 0)
+ return i;
+ }
+ return XCHAL_NUM_INTERRUPTS;
}
-static void enable_xtensa_irq(unsigned int irq)
+unsigned xtensa_get_ext_irq_no(unsigned irq)
{
- unsigned long flags;
- local_save_flags(flags);
- unmask_irq(irq);
- local_irq_restore(flags);
+ unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL) &
+ ((1u << irq) - 1);
+ return hweight32(mask);
}
-static void mask_and_ack_xtensa(unsigned int irq)
+void __init init_IRQ(void)
{
- disable_xtensa_irq(irq);
+#ifdef CONFIG_OF
+ irqchip_init();
+#else
+#ifdef CONFIG_HAVE_SMP
+ xtensa_mx_init_legacy(NULL);
+#else
+ xtensa_pic_init_legacy(NULL);
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+ ipi_init();
+#endif
+ variant_init_irq();
}
-static void end_xtensa_irq(unsigned int irq)
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * The CPU has been marked offline. Migrate IRQs off this CPU. If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
{
- enable_xtensa_irq(irq);
-}
+ unsigned int i, cpu = smp_processor_id();
+ for_each_active_irq(i) {
+ struct irq_data *data = irq_get_irq_data(i);
+ unsigned int newcpu;
-void __init init_IRQ(void)
-{
- int i;
+ if (irqd_is_per_cpu(data))
+ continue;
- for (i=0; i < XTENSA_NR_IRQS; i++)
- irq_desc[i].chip = &xtensa_irq_type;
+ if (!cpumask_test_cpu(cpu, data->affinity))
+ continue;
- cached_irq_mask = 0;
+ newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
- platform_init_irq();
+ if (newcpu >= nr_cpu_ids) {
+ pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ i, cpu);
+
+ cpumask_setall(data->affinity);
+ }
+ irq_set_affinity(i, data->affinity);
+ }
}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/xtensa/kernel/mcount.S b/arch/xtensa/kernel/mcount.S
new file mode 100644
index 00000000000..0eeda2e4a25
--- /dev/null
+++ b/arch/xtensa/kernel/mcount.S
@@ -0,0 +1,50 @@
+/*
+ * arch/xtensa/kernel/mcount.S
+ *
+ * Xtensa specific mcount support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+
+/*
+ * Entry condition:
+ *
+ * a2: a0 of the caller
+ */
+
+ENTRY(_mcount)
+
+ entry a1, 16
+
+ movi a4, ftrace_trace_function
+ l32i a4, a4, 0
+ movi a3, ftrace_stub
+ bne a3, a4, 1f
+ retw
+
+1: xor a7, a2, a1
+ movi a3, 0x3fffffff
+ and a7, a7, a3
+ xor a7, a7, a1
+
+ xor a6, a0, a1
+ and a6, a6, a3
+ xor a6, a6, a1
+ addi a6, a6, -MCOUNT_INSN_SIZE
+ callx4 a4
+
+ retw
+
+ENDPROC(_mcount)
+
+ENTRY(ftrace_stub)
+ entry a1, 16
+ retw
+ENDPROC(ftrace_stub)
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
index d1683cfa19a..b715237bae6 100644
--- a/arch/xtensa/kernel/module.c
+++ b/arch/xtensa/kernel/module.c
@@ -1,5 +1,5 @@
/*
- * arch/xtensa/kernel/platform.c
+ * arch/xtensa/kernel/module.c
*
* Module support.
*
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2006 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*
@@ -22,57 +22,171 @@
#include <linux/kernel.h>
#include <linux/cache.h>
-LIST_HEAD(module_buf_list);
+#undef DEBUG_RELOCATE
-void *module_alloc(unsigned long size)
+static int
+decode_calln_opcode (unsigned char *location)
{
- panic("module_alloc not implemented");
+#ifdef __XTENSA_EB__
+ return (location[0] & 0xf0) == 0x50;
+#endif
+#ifdef __XTENSA_EL__
+ return (location[0] & 0xf) == 0x5;
+#endif
}
-void module_free(struct module *mod, void *module_region)
+static int
+decode_l32r_opcode (unsigned char *location)
{
- panic("module_free not implemented");
-}
-
-int module_frob_arch_sections(Elf32_Ehdr *hdr,
- Elf32_Shdr *sechdrs,
- char *secstrings,
- struct module *me)
-{
- panic("module_frob_arch_sections not implemented");
-}
-
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *module)
-{
- panic ("apply_relocate not implemented");
+#ifdef __XTENSA_EB__
+ return (location[0] & 0xf0) == 0x10;
+#endif
+#ifdef __XTENSA_EL__
+ return (location[0] & 0xf) == 0x1;
+#endif
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
- struct module *module)
+ struct module *mod)
{
- panic("apply_relocate_add not implemented");
-}
+ unsigned int i;
+ Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ unsigned char *location;
+ uint32_t value;
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- panic ("module_finalize not implemented");
-}
+#ifdef DEBUG_RELOCATE
+ printk("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+#endif
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
+ location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rela[i].r_offset;
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rela[i].r_info);
+ value = sym->st_value + rela[i].r_addend;
-void module_arch_cleanup(struct module *mod)
-{
- panic("module_arch_cleanup not implemented");
-}
+ switch (ELF32_R_TYPE(rela[i].r_info)) {
+ case R_XTENSA_NONE:
+ case R_XTENSA_DIFF8:
+ case R_XTENSA_DIFF16:
+ case R_XTENSA_DIFF32:
+ case R_XTENSA_ASM_EXPAND:
+ break;
-struct bug_entry *module_find_bug(unsigned long bugaddr)
-{
- panic("module_find_bug not implemented");
+ case R_XTENSA_32:
+ case R_XTENSA_PLT:
+ *(uint32_t *)location += value;
+ break;
+
+ case R_XTENSA_SLOT0_OP:
+ if (decode_calln_opcode(location)) {
+ value -= ((unsigned long)location & -4) + 4;
+ if ((value & 3) != 0 ||
+ ((value + (1 << 19)) >> 20) != 0) {
+ printk("%s: relocation out of range, "
+ "section %d reloc %d "
+ "sym '%s'\n",
+ mod->name, relsec, i,
+ strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+ value = (signed int)value >> 2;
+#ifdef __XTENSA_EB__
+ location[0] = ((location[0] & ~0x3) |
+ ((value >> 16) & 0x3));
+ location[1] = (value >> 8) & 0xff;
+ location[2] = value & 0xff;
+#endif
+#ifdef __XTENSA_EL__
+ location[0] = ((location[0] & ~0xc0) |
+ ((value << 6) & 0xc0));
+ location[1] = (value >> 2) & 0xff;
+ location[2] = (value >> 10) & 0xff;
+#endif
+ } else if (decode_l32r_opcode(location)) {
+ value -= (((unsigned long)location + 3) & -4);
+ if ((value & 3) != 0 ||
+ (signed int)value >> 18 != -1) {
+ printk("%s: relocation out of range, "
+ "section %d reloc %d "
+ "sym '%s'\n",
+ mod->name, relsec, i,
+ strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+ value = (signed int)value >> 2;
+
+#ifdef __XTENSA_EB__
+ location[1] = (value >> 8) & 0xff;
+ location[2] = value & 0xff;
+#endif
+#ifdef __XTENSA_EL__
+ location[1] = value & 0xff;
+ location[2] = (value >> 8) & 0xff;
+#endif
+ }
+ /* FIXME: Ignore any other opcodes. The Xtensa
+ assembler currently assumes that the linker will
+ always do relaxation and so all PC-relative
+ operands need relocations. (The assembler also
+ writes out the tentative PC-relative values,
+ assuming no link-time relaxation, so it is usually
+ safe to ignore the relocations.) If the
+ assembler's "--no-link-relax" flag can be made to
+ work, and if all kernel modules can be assembled
+ with that flag, then unexpected relocations could
+ be detected here. */
+ break;
+
+ case R_XTENSA_SLOT1_OP:
+ case R_XTENSA_SLOT2_OP:
+ case R_XTENSA_SLOT3_OP:
+ case R_XTENSA_SLOT4_OP:
+ case R_XTENSA_SLOT5_OP:
+ case R_XTENSA_SLOT6_OP:
+ case R_XTENSA_SLOT7_OP:
+ case R_XTENSA_SLOT8_OP:
+ case R_XTENSA_SLOT9_OP:
+ case R_XTENSA_SLOT10_OP:
+ case R_XTENSA_SLOT11_OP:
+ case R_XTENSA_SLOT12_OP:
+ case R_XTENSA_SLOT13_OP:
+ case R_XTENSA_SLOT14_OP:
+ printk("%s: unexpected FLIX relocation: %u\n",
+ mod->name,
+ ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+
+ case R_XTENSA_SLOT0_ALT:
+ case R_XTENSA_SLOT1_ALT:
+ case R_XTENSA_SLOT2_ALT:
+ case R_XTENSA_SLOT3_ALT:
+ case R_XTENSA_SLOT4_ALT:
+ case R_XTENSA_SLOT5_ALT:
+ case R_XTENSA_SLOT6_ALT:
+ case R_XTENSA_SLOT7_ALT:
+ case R_XTENSA_SLOT8_ALT:
+ case R_XTENSA_SLOT9_ALT:
+ case R_XTENSA_SLOT10_ALT:
+ case R_XTENSA_SLOT11_ALT:
+ case R_XTENSA_SLOT12_ALT:
+ case R_XTENSA_SLOT13_ALT:
+ case R_XTENSA_SLOT14_ALT:
+ printk("%s: unexpected ALT relocation: %u\n",
+ mod->name,
+ ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+
+ default:
+ printk("%s: unexpected relocation: %u\n",
+ mod->name,
+ ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
}
diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S
new file mode 100644
index 00000000000..77a161a112c
--- /dev/null
+++ b/arch/xtensa/kernel/mxhead.S
@@ -0,0 +1,85 @@
+/*
+ * Xtensa Secondary Processors startup code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
+#include <asm/regs.h>
+
+
+ .section .SecondaryResetVector.text, "ax"
+
+
+ENTRY(_SecondaryResetVector)
+ _j _SetupOCD
+
+ .begin no-absolute-literals
+ .literal_position
+
+_SetupOCD:
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+_SetupMMU:
+ Offset = _SetupMMU - _SecondaryResetVector
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ /*
+ * Start Secondary Processors with NULL pointer to boot params.
+ */
+ movi a2, 0 # a2 == NULL
+ movi a3, _startup
+ jx a3
+
+ .end no-absolute-literals
+
+
+ .section .SecondaryResetVector.remapped_text, "ax"
+ .global _RemappedSecondaryResetVector
+
+ .org 0 # Need to do org before literals
+
+_RemappedSecondaryResetVector:
+ .begin no-absolute-literals
+ .literal_position
+
+ _j _RemappedSetupMMU
+ . = _RemappedSecondaryResetVector + Offset
+
+_RemappedSetupMMU:
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ .end no-absolute-literals
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 1ff82268e8e..2d9cc6dbfd7 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -1,5 +1,5 @@
/*
- * arch/xtensa/pci-dma.c
+ * arch/xtensa/kernel/pci-dma.c
*
* DMA coherent memory allocation.
*
@@ -20,6 +20,8 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
@@ -29,29 +31,51 @@
*/
void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
{
- void *ret;
+ unsigned long ret;
+ unsigned long uncached = 0;
/* ignore region speicifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
- if (dev == NULL || (*dev->dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
- ret = (void *)__get_free_pages(gfp, get_order(size));
+ flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
- if (ret != NULL) {
- memset(ret, 0, size);
- *handle = virt_to_bus(ret);
+ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+ flag |= GFP_DMA;
+ ret = (unsigned long)__get_free_pages(flag, get_order(size));
+
+ if (ret == 0)
+ return NULL;
+
+ /* We currently don't support coherent memory outside KSEG */
+
+ if (ret < XCHAL_KSEG_CACHED_VADDR
+ || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
+ BUG();
+
+
+ if (ret != 0) {
+ memset((void*) ret, 0, size);
+ uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR;
+ *handle = virt_to_bus((void*)ret);
+ __flush_invalidate_dcache_range(ret, size);
}
- return (void*) BYPASS_ADDR((unsigned long)ret);
+
+ return (void*)uncached;
}
+EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size));
+ long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
+
+ if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
+ BUG();
+
+ free_pages(addr, get_order(size));
}
+EXPORT_SYMBOL(dma_free_coherent);
void consistent_sync(void *vaddr, size_t size, int direction)
@@ -71,3 +95,4 @@ void consistent_sync(void *vaddr, size_t size, int direction)
break;
}
}
+EXPORT_SYMBOL(consistent_sync);
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index 8709f8249d0..5b3403388d7 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -1,5 +1,5 @@
/*
- * arch/xtensa/pcibios.c
+ * arch/xtensa/kernel/pci.c
*
* PCI bios-type initialisation for PCI machines
*
@@ -46,7 +46,6 @@
* pcibios_fixups
* pcibios_align_resource
* pcibios_fixup_bus
- * pcibios_setup
* pci_bus_add_device
* pci_mmap_page_range
*/
@@ -69,26 +68,25 @@ static int pci_bus_count;
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
-void
-pcibios_align_resource(void *data, struct resource *res, resource_size_t size,
- resource_size_t align)
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
+ resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
- resource_size_t start = res->start;
-
if (size > 0x100) {
- printk(KERN_ERR "PCI: I/O Region %s/%d too large"
- " (%ld bytes)\n", pci_name(dev),
- dev->resource - res, size);
+ pr_err("PCI: I/O Region %s/%d too large (%u bytes)\n",
+ pci_name(dev), dev->resource - res,
+ size);
}
- if (start & 0x300) {
+ if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
- res->start = start;
- }
}
+
+ return start;
}
int
@@ -135,32 +133,60 @@ struct pci_controller * __init pcibios_alloc_controller(void)
return pci_ctrl;
}
+static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
+ struct list_head *resources)
+{
+ struct resource *res;
+ unsigned long io_offset;
+ int i;
+
+ io_offset = (unsigned long)pci_ctrl->io_space.base;
+ res = &pci_ctrl->io_resource;
+ if (!res->flags) {
+ if (io_offset)
+ printk (KERN_ERR "I/O resource not set for host"
+ " bridge %d\n", pci_ctrl->index);
+ res->start = 0;
+ res->end = IO_SPACE_LIMIT;
+ res->flags = IORESOURCE_IO;
+ }
+ res->start += io_offset;
+ res->end += io_offset;
+ pci_add_resource_offset(resources, res, io_offset);
+
+ for (i = 0; i < 3; i++) {
+ res = &pci_ctrl->mem_resources[i];
+ if (!res->flags) {
+ if (i > 0)
+ continue;
+ printk(KERN_ERR "Memory resource not set for "
+ "host bridge %d\n", pci_ctrl->index);
+ res->start = 0;
+ res->end = ~0U;
+ res->flags = IORESOURCE_MEM;
+ }
+ pci_add_resource(resources, res);
+ }
+}
+
static int __init pcibios_init(void)
{
struct pci_controller *pci_ctrl;
+ struct list_head resources;
struct pci_bus *bus;
- int next_busno = 0, i;
+ int next_busno = 0;
printk("PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */
for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
pci_ctrl->last_busno = 0xff;
- bus = pci_scan_bus(pci_ctrl->first_busno, pci_ctrl->ops,
- pci_ctrl);
- if (pci_ctrl->io_resource.flags) {
- unsigned long offs;
-
- offs = (unsigned long)pci_ctrl->io_space.base;
- pci_ctrl->io_resource.start += offs;
- pci_ctrl->io_resource.end += offs;
- bus->resource[0] = &pci_ctrl->io_resource;
- }
- for (i = 0; i < 3; ++i)
- if (pci_ctrl->mem_resources[i].flags)
- bus->resource[i+1] =&pci_ctrl->mem_resources[i];
+ INIT_LIST_HEAD(&resources);
+ pci_controller_apertures(pci_ctrl, &resources);
+ bus = pci_scan_root_bus(NULL, pci_ctrl->first_busno,
+ pci_ctrl->ops, pci_ctrl, &resources);
pci_ctrl->bus = bus;
- pci_ctrl->last_busno = bus->subordinate;
+ pci_ctrl->last_busno = bus->busn_res.end;
if (next_busno <= pci_ctrl->last_busno)
next_busno = pci_ctrl->last_busno+1;
}
@@ -171,69 +197,17 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
-void __init pcibios_fixup_bus(struct pci_bus *bus)
+void pcibios_fixup_bus(struct pci_bus *bus)
{
- struct pci_controller *pci_ctrl = bus->sysdata;
- struct resource *res;
- unsigned long io_offset;
- int i;
-
- io_offset = (unsigned long)pci_ctrl->io_space.base;
- if (bus->parent == NULL) {
- /* this is a host bridge - fill in its resources */
- pci_ctrl->bus = bus;
-
- bus->resource[0] = res = &pci_ctrl->io_resource;
- if (!res->flags) {
- if (io_offset)
- printk (KERN_ERR "I/O resource not set for host"
- " bridge %d\n", pci_ctrl->index);
- res->start = 0;
- res->end = IO_SPACE_LIMIT;
- res->flags = IORESOURCE_IO;
- }
- res->start += io_offset;
- res->end += io_offset;
-
- for (i = 0; i < 3; i++) {
- res = &pci_ctrl->mem_resources[i];
- if (!res->flags) {
- if (i > 0)
- continue;
- printk(KERN_ERR "Memory resource not set for "
- "host bridge %d\n", pci_ctrl->index);
- res->start = 0;
- res->end = ~0U;
- res->flags = IORESOURCE_MEM;
- }
- bus->resource[i+1] = res;
- }
- } else {
+ if (bus->parent) {
/* This is a subordinate bridge */
pci_read_bridge_bases(bus);
-
- for (i = 0; i < 4; i++) {
- if ((res = bus->resource[i]) == NULL || !res->flags)
- continue;
- if (io_offset && (res->flags & IORESOURCE_IO)) {
- res->start += io_offset;
- res->end += io_offset;
- }
- }
}
}
-char __init *pcibios_setup(char *str)
-{
- return str;
-}
-
-/* the next one is stolen from the alpha port... */
-
-void __init
-pcibios_update_irq(struct pci_dev *dev, int irq)
+void pcibios_set_master(struct pci_dev *dev)
{
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+ /* No special bus mastering setup handling */
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
@@ -359,7 +333,7 @@ __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
int prot = pgprot_val(vma->vm_page_prot);
/* Set to write-through */
- prot &= ~_PAGE_NO_CACHE;
+ prot = (prot & _PAGE_CA_MASK) | _PAGE_CA_WT;
#if 0
if (!write_combine)
prot |= _PAGE_WRITETHRU;
@@ -394,72 +368,3 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return ret;
}
-
-/*
- * This probably belongs here rather than ioport.c because
- * we do not want this crud linked into SBus kernels.
- * Also, think for a moment about likes of floppy.c that
- * include architecture specific parts. They may want to redefine ins/outs.
- *
- * We do not use horroble macroses here because we want to
- * advance pointer by sizeof(size).
- */
-void outsb(unsigned long addr, const void *src, unsigned long count) {
- while (count) {
- count -= 1;
- writeb(*(const char *)src, addr);
- src += 1;
- addr += 1;
- }
-}
-
-void outsw(unsigned long addr, const void *src, unsigned long count) {
- while (count) {
- count -= 2;
- writew(*(const short *)src, addr);
- src += 2;
- addr += 2;
- }
-}
-
-void outsl(unsigned long addr, const void *src, unsigned long count) {
- while (count) {
- count -= 4;
- writel(*(const long *)src, addr);
- src += 4;
- addr += 4;
- }
-}
-
-void insb(unsigned long addr, void *dst, unsigned long count) {
- while (count) {
- count -= 1;
- *(unsigned char *)dst = readb(addr);
- dst += 1;
- addr += 1;
- }
-}
-
-void insw(unsigned long addr, void *dst, unsigned long count) {
- while (count) {
- count -= 2;
- *(unsigned short *)dst = readw(addr);
- dst += 2;
- addr += 2;
- }
-}
-
-void insl(unsigned long addr, void *dst, unsigned long count) {
- while (count) {
- count -= 4;
- /*
- * XXX I am sure we are in for an unaligned trap here.
- */
- *(unsigned long *)dst = readl(addr);
- dst += 4;
- addr += 4;
- }
-}
-
-
-
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
index 69675f21606..1cf008284dd 100644
--- a/arch/xtensa/kernel/platform.c
+++ b/arch/xtensa/kernel/platform.c
@@ -29,21 +29,18 @@
*/
_F(void, setup, (char** cmd), { });
-_F(void, init_irq, (void), { });
_F(void, restart, (void), { while(1); });
_F(void, halt, (void), { while(1); });
_F(void, power_off, (void), { while(1); });
_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
_F(void, heartbeat, (void), { });
_F(int, pcibios_fixup, (void), { return 0; });
-_F(int, get_rtc_time, (time_t* t), { return 0; });
-_F(int, set_rtc_time, (time_t t), { return 0; });
+_F(void, pcibios_init, (void), { });
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
_F(void, calibrate_ccount, (void),
{
- printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
- ccount_per_jiffy = 100 * (1000000UL/HZ);
+ pr_err("ERROR: Cannot calibrate cpu frequency! Assuming 10MHz.\n");
+ ccount_freq = 10 * 1000000UL;
});
#endif
-
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index a7c4178c2a8..1c85323f01d 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -1,4 +1,3 @@
-// TODO verify coprocessor handling
/*
* arch/xtensa/kernel/process.c
*
@@ -21,45 +20,32 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
-#include <linux/slab.h>
#include <linux/elf.h>
#include <linux/init.h>
#include <linux/prctl.h>
#include <linux/init_task.h>
#include <linux/module.h>
#include <linux/mqueue.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/platform.h>
#include <asm/mmu.h>
#include <asm/irq.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/asm-offsets.h>
-#include <asm/coprocessor.h>
+#include <asm/regs.h>
extern void ret_from_fork(void);
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_SYMBOL(init_mm);
-
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
-{ INIT_THREAD_INFO(init_task) };
-
-struct task_struct init_task = INIT_TASK(init_task);
-EXPORT_SYMBOL(init_task);
+extern void ret_from_kernel_thread(void);
struct task_struct *current_set[NR_CPUS] = {&init_task, };
@@ -67,148 +53,227 @@ void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
-#if XCHAL_CP_NUM > 0
+#if XTENSA_HAVE_COPROCESSORS
-/*
- * Coprocessor ownership.
- */
+void coprocessor_release_all(struct thread_info *ti)
+{
+ unsigned long cpenable;
+ int i;
-coprocessor_info_t coprocessor_info[] = {
- { 0, XTENSA_CPE_CP0_OFFSET },
- { 0, XTENSA_CPE_CP1_OFFSET },
- { 0, XTENSA_CPE_CP2_OFFSET },
- { 0, XTENSA_CPE_CP3_OFFSET },
- { 0, XTENSA_CPE_CP4_OFFSET },
- { 0, XTENSA_CPE_CP5_OFFSET },
- { 0, XTENSA_CPE_CP6_OFFSET },
- { 0, XTENSA_CPE_CP7_OFFSET },
-};
+ /* Make sure we don't switch tasks during this operation. */
+
+ preempt_disable();
+
+ /* Walk through all cp owners and release it for the requested one. */
+
+ cpenable = ti->cpenable;
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if (coprocessor_owner[i] == ti) {
+ coprocessor_owner[i] = 0;
+ cpenable &= ~(1 << i);
+ }
+ }
+
+ ti->cpenable = cpenable;
+ coprocessor_clear_cpenable();
+
+ preempt_enable();
+}
+
+void coprocessor_flush_all(struct thread_info *ti)
+{
+ unsigned long cpenable;
+ int i;
+
+ preempt_disable();
+
+ cpenable = ti->cpenable;
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
+ coprocessor_flush(ti, i);
+ cpenable >>= 1;
+ }
+
+ preempt_enable();
+}
#endif
+
/*
* Powermanagement idle function, if any is provided by the platform.
*/
-
-void cpu_idle(void)
+void arch_cpu_idle(void)
{
- local_irq_enable();
-
- /* endless idle loop with no priority at all */
- while (1) {
- while (!need_resched())
- platform_idle();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
+ platform_idle();
}
/*
- * Free current thread data structures etc..
+ * This is called when the thread calls exit().
*/
-
void exit_thread(void)
{
- release_coprocessors(current); /* Empty macro if no CPs are defined */
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_release_all(current_thread_info());
+#endif
}
+/*
+ * Flush thread state. This is called when a thread does an execve()
+ * Note that we flush coprocessor registers for the case execve fails.
+ */
void flush_thread(void)
{
- release_coprocessors(current); /* Empty macro if no CPs are defined */
+#if XTENSA_HAVE_COPROCESSORS
+ struct thread_info *ti = current_thread_info();
+ coprocessor_flush_all(ti);
+ coprocessor_release_all(ti);
+#endif
+}
+
+/*
+ * this gets called so that we can store coprocessor state into memory and
+ * copy the current task into the new thread.
+ */
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_flush_all(task_thread_info(src));
+#endif
+ *dst = *src;
+ return 0;
}
/*
* Copy thread.
*
+ * There are two modes in which this function is called:
+ * 1) Userspace thread creation,
+ * regs != NULL, usp_thread_fn is userspace stack pointer.
+ * It is expected to copy parent regs (in case CLONE_VM is not set
+ * in the clone_flags) and set up passed usp in the childregs.
+ * 2) Kernel thread creation,
+ * regs == NULL, usp_thread_fn is the function to run in the new thread
+ * and thread_fn_arg is its parameter.
+ * childregs are not used for the kernel threads.
+ *
* The stack layout for the new thread looks like this:
*
- * +------------------------+ <- sp in childregs (= tos)
+ * +------------------------+
* | childregs |
* +------------------------+ <- thread.sp = sp in dummy-frame
* | dummy-frame | (saved in dummy-frame spill-area)
* +------------------------+
*
- * We create a dummy frame to return to ret_from_fork:
- * a0 points to ret_from_fork (simulating a call4)
+ * We create a dummy frame to return to either ret_from_fork or
+ * ret_from_kernel_thread:
+ * a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4)
* sp points to itself (thread.sp)
- * a2, a3 are unused.
+ * a2, a3 are unused for userspace threads,
+ * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
*
* Note: This is a pristine frame, so we don't need any spill region on top of
* childregs.
+ *
+ * The fun part: if we're keeping the same VM (i.e. cloning a thread,
+ * not an entire process), we're normally given a new usp, and we CANNOT share
+ * any live address register windows. If we just copy those live frames over,
+ * the two threads (parent and child) will overflow the same frames onto the
+ * parent stack at different times, likely corrupting the parent stack (esp.
+ * if the parent returns from functions that called clone() and calls new
+ * ones, before the child overflows its now old copies of its parent windows).
+ * One solution is to spill windows to the parent stack, but that's fairly
+ * involved. Much simpler to just not copy those live frames across.
*/
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
- unsigned long unused,
- struct task_struct * p, struct pt_regs * regs)
+int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
+ unsigned long thread_fn_arg, struct task_struct *p)
{
- struct pt_regs *childregs;
- unsigned long tos;
- int user_mode = user_mode(regs);
-
- /* Set up new TSS. */
- tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
- if (user_mode)
- childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
- else
- childregs = (struct pt_regs*)tos - 1;
+ struct pt_regs *childregs = task_pt_regs(p);
- *childregs = *regs;
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ struct thread_info *ti;
+#endif
/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
*((int*)childregs - 3) = (unsigned long)childregs;
*((int*)childregs - 4) = 0;
- childregs->areg[1] = tos;
- childregs->areg[2] = 0;
- p->set_child_tid = p->clear_child_tid = NULL;
- p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
p->thread.sp = (unsigned long)childregs;
- if (user_mode(regs)) {
- int len = childregs->wmask & ~0xf;
+ if (!(p->flags & PF_KTHREAD)) {
+ struct pt_regs *regs = current_pt_regs();
+ unsigned long usp = usp_thread_fn ?
+ usp_thread_fn : regs->areg[1];
+
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_fork, 0x1);
+
+ /* This does not copy all the regs.
+ * In a bout of brilliance or madness,
+ * ARs beyond a0-a15 exist past the end of the struct.
+ */
+ *childregs = *regs;
childregs->areg[1] = usp;
- memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
- &regs->areg[XCHAL_NUM_AREGS - len/4], len);
+ childregs->areg[2] = 0;
+
+ /* When sharing memory with the parent thread, the child
+ usually starts on a pristine stack, so we have to reset
+ windowbase, windowstart and wmask.
+ (Note that such a new thread is required to always create
+ an initial call4 frame)
+ The exception is vfork, where the new thread continues to
+ run on the parent's stack until it calls execve. This could
+ be a call8 or call12, which requires a legal stack frame
+ of the previous caller for the overflow handlers to work.
+ (Note that it's always legal to overflow live registers).
+ In this case, ensure to spill at least the stack pointer
+ of that frame. */
+
+ if (clone_flags & CLONE_VM) {
+ /* check that caller window is live and same stack */
+ int len = childregs->wmask & ~0xf;
+ if (regs->areg[1] == usp && len != 0) {
+ int callinc = (regs->areg[0] >> 30) & 3;
+ int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
+ put_user(regs->areg[caller_ars+1],
+ (unsigned __user*)(usp - 12));
+ }
+ childregs->wmask = 1;
+ childregs->windowstart = 1;
+ childregs->windowbase = 0;
+ } else {
+ int len = childregs->wmask & ~0xf;
+ memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
+ &regs->areg[XCHAL_NUM_AREGS - len/4], len);
+ }
+ /* The thread pointer is passed in the '4th argument' (= a5) */
if (clone_flags & CLONE_SETTLS)
- childregs->areg[2] = childregs->areg[6];
-
+ childregs->threadptr = childregs->areg[5];
} else {
- /* In kernel space, we start a new thread with a new stack. */
- childregs->wmask = 1;
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_kernel_thread, 1);
+
+ /* pass parameters to ret_from_kernel_thread:
+ * a2 = thread_fn, a3 = thread_fn arg
+ */
+ *((int *)childregs - 1) = thread_fn_arg;
+ *((int *)childregs - 2) = usp_thread_fn;
+
+ /* Childregs are only used when we're going to userspace
+ * in which case start_thread will set them up.
+ */
}
- return 0;
-}
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ ti = task_thread_info(p);
+ ti->cpenable = 0;
+#endif
-/*
- * Create a kernel thread
- */
-
-int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-{
- long retval;
- __asm__ __volatile__
- ("mov a5, %4\n\t" /* preserve fn in a5 */
- "mov a6, %3\n\t" /* preserve and setup arg in a6 */
- "movi a2, %1\n\t" /* load __NR_clone for syscall*/
- "mov a3, sp\n\t" /* sp check and sys_clone */
- "mov a4, %5\n\t" /* load flags for syscall */
- "syscall\n\t"
- "beq a3, sp, 1f\n\t" /* branch if parent */
- "callx4 a5\n\t" /* call fn */
- "movi a2, %2\n\t" /* load __NR_exit for syscall */
- "mov a3, a6\n\t" /* load fn return value */
- "syscall\n"
- "1:\n\t"
- "mov %0, a2\n\t" /* parent returns zero */
- :"=r" (retval)
- :"i" (__NR_clone), "i" (__NR_exit),
- "r" (arg), "r" (fn),
- "r" (flags | CLONE_VM)
- : "a2", "a3", "a4", "a5", "a6" );
- return retval;
+ return 0;
}
@@ -245,10 +310,6 @@ unsigned long get_wchan(struct task_struct *p)
}
/*
- * do_copy_regs() gathers information from 'struct pt_regs' and
- * 'current->thread.areg[]' to fill in the xtensa_gregset_t
- * structure.
- *
* xtensa_gregset_t and 'struct pt_regs' are vastly different formats
* of processor registers. Besides different ordering,
* xtensa_gregset_t contains non-live register information that
@@ -257,229 +318,39 @@ unsigned long get_wchan(struct task_struct *p)
*
*/
-void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
- struct task_struct *tsk)
+void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
{
- int i, n, wb_offset;
+ unsigned long wb, ws, wm;
+ int live, last;
- elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0;
- elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1;
+ wb = regs->windowbase;
+ ws = regs->windowstart;
+ wm = regs->wmask;
+ ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
- __asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i));
- elfregs->cpux = i;
- __asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i));
- elfregs->cpuy = i;
+ /* Don't leak any random bits. */
+
+ memset(elfregs, 0, sizeof(*elfregs));
/* Note: PS.EXCM is not set while user task is running; its
* being set in regs->ps is for exception handling convenience.
*/
elfregs->pc = regs->pc;
- elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
- elfregs->exccause = regs->exccause;
- elfregs->excvaddr = regs->excvaddr;
- elfregs->windowbase = regs->windowbase;
- elfregs->windowstart = regs->windowstart;
+ elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT));
elfregs->lbeg = regs->lbeg;
elfregs->lend = regs->lend;
elfregs->lcount = regs->lcount;
elfregs->sar = regs->sar;
- elfregs->syscall = regs->syscall;
-
- /* Copy register file.
- * The layout looks like this:
- *
- * | a0 ... a15 | Z ... Z | arX ... arY |
- * current window unused saved frames
- */
-
- memset (elfregs->ar, 0, sizeof(elfregs->ar));
-
- wb_offset = regs->windowbase * 4;
- n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
-
- for (i = 0; i < n; i++)
- elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
+ elfregs->windowstart = ws;
- n = (regs->wmask >> 4) * 4;
-
- for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
- elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
+ live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
+ last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
+ memcpy(elfregs->a, regs->areg, live * 4);
+ memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
}
-void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
+int dump_fpu(void)
{
- do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
-}
-
-
-/* The inverse of do_copy_regs(). No error or sanity checking. */
-
-void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
- struct task_struct *tsk)
-{
- int i, n, wb_offset;
-
- /* Note: PS.EXCM is not set while user task is running; it
- * needs to be set in regs->ps is for exception handling convenience.
- */
-
- regs->pc = elfregs->pc;
- regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
- regs->exccause = elfregs->exccause;
- regs->excvaddr = elfregs->excvaddr;
- regs->windowbase = elfregs->windowbase;
- regs->windowstart = elfregs->windowstart;
- regs->lbeg = elfregs->lbeg;
- regs->lend = elfregs->lend;
- regs->lcount = elfregs->lcount;
- regs->sar = elfregs->sar;
- regs->syscall = elfregs->syscall;
-
- /* Clear everything. */
-
- memset (regs->areg, 0, sizeof(regs->areg));
-
- /* Copy regs from live window frame. */
-
- wb_offset = regs->windowbase * 4;
- n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
-
- for (i = 0; i < n; i++)
- regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
-
- n = (regs->wmask >> 4) * 4;
-
- for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
- regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
-}
-
-/*
- * do_save_fpregs() gathers information from 'struct pt_regs' and
- * 'current->thread' to fill in the elf_fpregset_t structure.
- *
- * Core files and ptrace use elf_fpregset_t.
- */
-
-void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
- struct task_struct *tsk)
-{
-#if XCHAL_HAVE_CP
-
- extern unsigned char _xtensa_reginfo_tables[];
- extern unsigned _xtensa_reginfo_table_size;
- int i;
- unsigned long flags;
-
- /* Before dumping coprocessor state from memory,
- * ensure any live coprocessor contents for this
- * task are first saved to memory:
- */
- local_irq_save(flags);
-
- for (i = 0; i < XCHAL_CP_MAX; i++) {
- if (tsk == coprocessor_info[i].owner) {
- enable_coprocessor(i);
- save_coprocessor_registers(
- tsk->thread.cp_save+coprocessor_info[i].offset,i);
- disable_coprocessor(i);
- }
- }
-
- local_irq_restore(flags);
-
- /* Now dump coprocessor & extra state: */
- memcpy((unsigned char*)fpregs,
- _xtensa_reginfo_tables, _xtensa_reginfo_table_size);
- memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
- tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
-#endif
-}
-
-/*
- * The inverse of do_save_fpregs().
- * Copies coprocessor and extra state from fpregs into regs and tsk->thread.
- * Returns 0 on success, non-zero if layout doesn't match.
- */
-
-int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
- struct task_struct *tsk)
-{
-#if XCHAL_HAVE_CP
-
- extern unsigned char _xtensa_reginfo_tables[];
- extern unsigned _xtensa_reginfo_table_size;
- int i;
- unsigned long flags;
-
- /* Make sure save area layouts match.
- * FIXME: in the future we could allow restoring from
- * a different layout of the same registers, by comparing
- * fpregs' table with _xtensa_reginfo_tables and matching
- * entries and copying registers one at a time.
- * Not too sure yet whether that's very useful.
- */
-
- if( memcmp((unsigned char*)fpregs,
- _xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
- return -1;
- }
-
- /* Before restoring coprocessor state from memory,
- * ensure any live coprocessor contents for this
- * task are first invalidated.
- */
-
- local_irq_save(flags);
-
- for (i = 0; i < XCHAL_CP_MAX; i++) {
- if (tsk == coprocessor_info[i].owner) {
- enable_coprocessor(i);
- save_coprocessor_registers(
- tsk->thread.cp_save+coprocessor_info[i].offset,i);
- coprocessor_info[i].owner = 0;
- disable_coprocessor(i);
- }
- }
-
- local_irq_restore(flags);
-
- /* Now restore coprocessor & extra state: */
-
- memcpy(tsk->thread.cp_save,
- (unsigned char*)fpregs + _xtensa_reginfo_table_size,
- XTENSA_CP_EXTRA_SIZE);
-#endif
return 0;
}
-/*
- * Fill in the CP structure for a core dump for a particular task.
- */
-
-int
-dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
-{
-/* see asm/coprocessor.h for this magic number 16 */
-#if XTENSA_CP_EXTRA_SIZE > 16
- do_save_fpregs (r, regs, task);
-
- /* For now, bit 16 means some extra state may be present: */
-// FIXME!! need to track to return more accurate mask
- return 0x10000 | XCHAL_CP_MASK;
-#else
- return 0; /* no coprocessors active on this processor */
-#endif
-}
-
-/*
- * Fill in the CP structure for a core dump.
- * This includes any FPU coprocessor.
- * Here, we dump all coprocessors, and other ("extra") custom state.
- *
- * This function is called by elf_core_dump() in fs/binfmt_elf.c
- * (in which case 'regs' comes from calls to do_coredump, see signals.c).
- */
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
-{
- return dump_task_fpu(regs, current, r);
-}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 9aea23cc0dc..562fac66475 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Chris Zankel <chris@zankel.net>
@@ -19,24 +19,29 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/elf.h>
+#include <asm/coprocessor.h>
-#define TEST_KERNEL // verify kernel operations FIXME: remove
+void user_enable_single_step(struct task_struct *child)
+{
+ child->ptrace |= PT_SINGLESTEP;
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ child->ptrace &= ~PT_SINGLESTEP;
+}
/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
+ * Called by kernel/ptrace.c when detaching to disable single stepping.
*/
void ptrace_disable(struct task_struct *child)
@@ -44,300 +49,275 @@ void ptrace_disable(struct task_struct *child)
/* Nothing to do.. */
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+int ptrace_getregs(struct task_struct *child, void __user *uregs)
{
- int ret = -EPERM;
+ struct pt_regs *regs = task_pt_regs(child);
+ xtensa_gregset_t __user *gregset = uregs;
+ unsigned long wb = regs->windowbase;
+ int i;
+
+ if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
+ return -EIO;
+
+ __put_user(regs->pc, &gregset->pc);
+ __put_user(regs->ps & ~(1 << PS_EXCM_BIT), &gregset->ps);
+ __put_user(regs->lbeg, &gregset->lbeg);
+ __put_user(regs->lend, &gregset->lend);
+ __put_user(regs->lcount, &gregset->lcount);
+ __put_user(regs->windowstart, &gregset->windowstart);
+ __put_user(regs->windowbase, &gregset->windowbase);
+ __put_user(regs->threadptr, &gregset->threadptr);
+
+ for (i = 0; i < XCHAL_NUM_AREGS; i++)
+ __put_user(regs->areg[i],
+ gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS));
+
+ return 0;
+}
- switch (request) {
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA:
- {
- unsigned long tmp;
- int copied;
+int ptrace_setregs(struct task_struct *child, void __user *uregs)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ xtensa_gregset_t *gregset = uregs;
+ const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
+ unsigned long ps;
+ unsigned long wb, ws;
+
+ if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
+ return -EIO;
+
+ __get_user(regs->pc, &gregset->pc);
+ __get_user(ps, &gregset->ps);
+ __get_user(regs->lbeg, &gregset->lbeg);
+ __get_user(regs->lend, &gregset->lend);
+ __get_user(regs->lcount, &gregset->lcount);
+ __get_user(ws, &gregset->windowstart);
+ __get_user(wb, &gregset->windowbase);
+ __get_user(regs->threadptr, &gregset->threadptr);
+
+ regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
+
+ if (wb >= XCHAL_NUM_AREGS / 4)
+ return -EFAULT;
+
+ if (wb != regs->windowbase || ws != regs->windowstart) {
+ unsigned long rotws, wmask;
+
+ rotws = (((ws | (ws << WSBITS)) >> wb) &
+ ((1 << WSBITS) - 1)) & ~1;
+ wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
+ (rotws & 0xF) | 1;
+ regs->windowbase = wb;
+ regs->windowstart = ws;
+ regs->wmask = wmask;
+ }
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp,(unsigned long *) data);
+ if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4,
+ gregset->a, wb * 16))
+ return -EFAULT;
- goto out;
- }
+ if (__copy_from_user(regs->areg, gregset->a + wb * 4,
+ (WSBITS - wb) * 16))
+ return -EFAULT;
- /* Read the word at location addr in the USER area. */
+ return 0;
+}
- case PTRACE_PEEKUSR:
- {
- struct pt_regs *regs;
- unsigned long tmp;
- regs = task_pt_regs(child);
- tmp = 0; /* Default return value. */
+int ptrace_getxregs(struct task_struct *child, void __user *uregs)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ struct thread_info *ti = task_thread_info(child);
+ elf_xtregs_t __user *xtregs = uregs;
+ int ret = 0;
+
+ if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
+ return -EIO;
+
+#if XTENSA_HAVE_COPROCESSORS
+ /* Flush all coprocessor registers to memory. */
+ coprocessor_flush_all(ti);
+ ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp,
+ sizeof(xtregs_coprocessor_t));
+#endif
+ ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
+ sizeof(xtregs->opt));
+ ret |= __copy_to_user(&xtregs->user,&ti->xtregs_user,
+ sizeof(xtregs->user));
+
+ return ret ? -EFAULT : 0;
+}
+
+int ptrace_setxregs(struct task_struct *child, void __user *uregs)
+{
+ struct thread_info *ti = task_thread_info(child);
+ struct pt_regs *regs = task_pt_regs(child);
+ elf_xtregs_t *xtregs = uregs;
+ int ret = 0;
+
+ if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
+ return -EFAULT;
+
+#if XTENSA_HAVE_COPROCESSORS
+ /* Flush all coprocessors before we overwrite them. */
+ coprocessor_flush_all(ti);
+ coprocessor_release_all(ti);
+
+ ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
+ sizeof(xtregs_coprocessor_t));
+#endif
+ ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
+ sizeof(xtregs->opt));
+ ret |= __copy_from_user(&ti->xtregs_user, &xtregs->user,
+ sizeof(xtregs->user));
+
+ return ret ? -EFAULT : 0;
+}
+
+int ptrace_peekusr(struct task_struct *child, long regno, long __user *ret)
+{
+ struct pt_regs *regs;
+ unsigned long tmp;
+
+ regs = task_pt_regs(child);
+ tmp = 0; /* Default return value. */
- switch(addr) {
+ switch(regno) {
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
- {
- int ar = addr - REG_AR_BASE - regs->windowbase * 4;
- ar &= (XCHAL_NUM_AREGS - 1);
- if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
- tmp = regs->areg[ar];
- else
- ret = -EIO;
+ tmp = regs->areg[regno - REG_AR_BASE];
break;
- }
+
case REG_A_BASE ... REG_A_BASE + 15:
- tmp = regs->areg[addr - REG_A_BASE];
+ tmp = regs->areg[regno - REG_A_BASE];
break;
+
case REG_PC:
tmp = regs->pc;
break;
+
case REG_PS:
/* Note: PS.EXCM is not set while user task is running;
* its being set in regs is for exception handling
* convenience. */
- tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK);
+ tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
break;
+
case REG_WB:
- tmp = regs->windowbase;
- break;
+ break; /* tmp = 0 */
+
case REG_WS:
- tmp = regs->windowstart;
+ {
+ unsigned long wb = regs->windowbase;
+ unsigned long ws = regs->windowstart;
+ tmp = ((ws>>wb) | (ws<<(WSBITS-wb))) & ((1<<WSBITS)-1);
break;
+ }
case REG_LBEG:
tmp = regs->lbeg;
break;
+
case REG_LEND:
tmp = regs->lend;
break;
+
case REG_LCOUNT:
tmp = regs->lcount;
break;
+
case REG_SAR:
tmp = regs->sar;
break;
- case REG_DEPC:
- tmp = regs->depc;
- break;
- case REG_EXCCAUSE:
- tmp = regs->exccause;
- break;
- case REG_EXCVADDR:
- tmp = regs->excvaddr;
- break;
+
case SYSCALL_NR:
tmp = regs->syscall;
break;
- default:
- tmp = 0;
- ret = -EIO;
- goto out;
- }
- ret = put_user(tmp, (unsigned long *) data);
- goto out;
- }
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- if (access_process_vm(child, addr, &data, sizeof(data), 1)
- == sizeof(data))
- break;
- ret = -EIO;
- goto out;
+ default:
+ return -EIO;
+ }
+ return put_user(tmp, ret);
+}
- case PTRACE_POKEUSR:
- {
- struct pt_regs *regs;
- regs = task_pt_regs(child);
+int ptrace_pokeusr(struct task_struct *child, long regno, long val)
+{
+ struct pt_regs *regs;
+ regs = task_pt_regs(child);
- switch (addr) {
+ switch (regno) {
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
- {
- int ar = addr - REG_AR_BASE - regs->windowbase * 4;
- if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
- regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
- else
- ret = -EIO;
+ regs->areg[regno - REG_AR_BASE] = val;
break;
- }
+
case REG_A_BASE ... REG_A_BASE + 15:
- regs->areg[addr - REG_A_BASE] = data;
+ regs->areg[regno - REG_A_BASE] = val;
break;
+
case REG_PC:
- regs->pc = data;
+ regs->pc = val;
break;
+
case SYSCALL_NR:
- regs->syscall = data;
- break;
-#ifdef TEST_KERNEL
- case REG_WB:
- regs->windowbase = data;
- break;
- case REG_WS:
- regs->windowstart = data;
+ regs->syscall = val;
break;
-#endif
default:
- /* The rest are not allowed. */
- ret = -EIO;
- break;
- }
- break;
- }
-
- /* continue and stop at next (return from) syscall */
- case PTRACE_SYSCALL:
- case PTRACE_CONT: /* restart after signal. */
- {
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL)
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- else
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- child->exit_code = data;
- /* Make sure the single step bit is not set. */
- child->ptrace &= ~PT_SINGLESTEP;
- wake_up_process(child);
- ret = 0;
- break;
+ return -EIO;
}
+ return 0;
+}
- /*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL:
- ret = 0;
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- child->ptrace &= ~PT_SINGLESTEP;
- wake_up_process(child);
- break;
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret = -EPERM;
+ void __user *datap = (void __user *) data;
- case PTRACE_SINGLESTEP:
- ret = -EIO;
- if (!valid_signal(data))
- break;
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- child->ptrace |= PT_SINGLESTEP;
- child->exit_code = data;
- wake_up_process(child);
- ret = 0;
+ switch (request) {
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
- case PTRACE_GETREGS:
- {
- /* 'data' points to user memory in which to write.
- * Mainly due to the non-live register values, we
- * reformat the register values into something more
- * standard. For convenience, we use the handy
- * elf_gregset_t format. */
-
- xtensa_gregset_t format;
- struct pt_regs *regs = task_pt_regs(child);
-
- do_copy_regs (&format, regs, child);
-
- /* Now, copy to user space nice and easy... */
- ret = 0;
- if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
- ret = -EFAULT;
+ case PTRACE_PEEKUSR: /* read register specified by addr. */
+ ret = ptrace_peekusr(child, addr, datap);
break;
- }
-
- case PTRACE_SETREGS:
- {
- /* 'data' points to user memory that contains the new
- * values in the elf_gregset_t format. */
-
- xtensa_gregset_t format;
- struct pt_regs *regs = task_pt_regs(child);
-
- if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
- ret = -EFAULT;
- break;
- }
-
- /* FIXME: Perhaps we want some sanity checks on
- * these user-space values? See ARM version. Are
- * debuggers a security concern? */
-
- do_restore_regs (&format, regs, child);
- ret = 0;
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
- }
-
- case PTRACE_GETFPREGS:
- {
- /* 'data' points to user memory in which to write.
- * For convenience, we use the handy
- * elf_fpregset_t format. */
-
- elf_fpregset_t fpregs;
- struct pt_regs *regs = task_pt_regs(child);
-
- do_save_fpregs (&fpregs, regs, child);
-
- /* Now, copy to user space nice and easy... */
- ret = 0;
- if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
- ret = -EFAULT;
+ case PTRACE_POKEUSR: /* write register specified by addr. */
+ ret = ptrace_pokeusr(child, addr, data);
break;
- }
- case PTRACE_SETFPREGS:
- {
- /* 'data' points to user memory that contains the new
- * values in the elf_fpregset_t format.
- */
- elf_fpregset_t fpregs;
- struct pt_regs *regs = task_pt_regs(child);
-
- ret = 0;
- if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
- ret = -EFAULT;
- break;
- }
+ case PTRACE_GETREGS:
+ ret = ptrace_getregs(child, datap);
+ break;
- if (do_restore_fpregs (&fpregs, regs, child))
- ret = -EIO;
+ case PTRACE_SETREGS:
+ ret = ptrace_setregs(child, datap);
break;
- }
- case PTRACE_GETFPREGSIZE:
- /* 'data' points to 'unsigned long' set to the size
- * of elf_fpregset_t
- */
- ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
+ case PTRACE_GETXTREGS:
+ ret = ptrace_getxregs(child, datap);
break;
- case PTRACE_DETACH: /* detach a process that was attached. */
- ret = ptrace_detach(child, data);
+ case PTRACE_SETXTREGS:
+ ret = ptrace_setxregs(child, datap);
break;
default:
ret = ptrace_request(child, request, addr, data);
- goto out;
+ break;
}
- out:
+
return ret;
}
void do_syscall_trace(void)
{
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return;
-
- if (!(current->ptrace & PT_PTRACED))
- return;
-
/*
* The 0x80 provides a way for the tracing parent to distinguish
* between a syscall stop and SIGTRAP delivery
@@ -354,3 +334,21 @@ void do_syscall_trace(void)
current->exit_code = 0;
}
}
+
+void do_syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE)
+ && (current->ptrace & PT_PTRACED))
+ do_syscall_trace();
+
+#if 0
+ audit_syscall_entry(current, AUDIT_ARCH_XTENSA..);
+#endif
+}
+
+void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ if ((test_thread_flag(TIF_SYSCALL_TRACE))
+ && (current->ptrace & PT_PTRACED))
+ do_syscall_trace();
+}
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c
deleted file mode 100644
index d40f4b1b75a..00000000000
--- a/arch/xtensa/kernel/semaphore.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * arch/xtensa/kernel/semaphore.c
- *
- * Generic semaphore code. Buyer beware. Do your own specific changes
- * in <asm/semaphore-helper.h>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- *
- * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
- * Chris Zankel <chris@zankel.net>
- * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
- * Kevin Chea
- */
-
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/init.h>
-#include <asm/semaphore.h>
-#include <asm/errno.h>
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-
-static __inline__ void wake_one_more(struct semaphore * sem)
-{
- atomic_inc((atomic_t *)&sem->sleepers);
-}
-
-static __inline__ int waking_non_zero(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-
-static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- *
- * We must undo the sem->count down_trylock() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-
-static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 1;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers <= 0)
- atomic_inc(&sem->count);
- else {
- sem->sleepers--;
- ret = 0;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-spinlock_t semaphore_wake_lock;
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to sleep, while the "waking" variable is
- * incremented when the "up()" code goes to wake up waiting
- * processes.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * waking_non_zero() (from asm/semaphore.h) must execute
- * atomically.
- *
- * When __up() is called, the count was negative before
- * incrementing it, and we need to wake up somebody.
- *
- * This routine adds one to the count of processes that need to
- * wake up and exit. ALL waiting processes actually wake up but
- * only the one that gets to the "waking" field first will gate
- * through and acquire the semaphore. The others will go back
- * to sleep.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-void __up(struct semaphore *sem)
-{
- wake_one_more(sem);
- wake_up(&sem->wait);
-}
-
-/*
- * Perform the "down" function. Return zero for semaphore acquired,
- * return negative for signalled out of the function.
- *
- * If called from __down, the return is ignored and the wait loop is
- * not interruptible. This means that a task waiting on a semaphore
- * using "down()" cannot be killed until someone does an "up()" on
- * the semaphore.
- *
- * If called from __down_interruptible, the return value gets checked
- * upon return. If the return value is negative then the task continues
- * with the negative value in the return register (it can be tested by
- * the caller).
- *
- * Either form may be used in conjunction with "up()".
- *
- */
-
-#define DOWN_VAR \
- struct task_struct *tsk = current; \
- wait_queue_t wait; \
- init_waitqueue_entry(&wait, tsk);
-
-#define DOWN_HEAD(task_state) \
- \
- \
- tsk->state = (task_state); \
- add_wait_queue(&sem->wait, &wait); \
- \
- /* \
- * Ok, we're set up. sem->count is known to be less than zero \
- * so we must wait. \
- * \
- * We can let go the lock for purposes of waiting. \
- * We re-acquire it after awaking so as to protect \
- * all semaphore operations. \
- * \
- * If "up()" is called before we call waking_non_zero() then \
- * we will catch it right away. If it is called later then \
- * we will have to go through a wakeup cycle to catch it. \
- * \
- * Multiple waiters contend for the semaphore lock to see \
- * who gets to gate through and who has to wait some more. \
- */ \
- for (;;) {
-
-#define DOWN_TAIL(task_state) \
- tsk->state = (task_state); \
- } \
- tsk->state = TASK_RUNNING; \
- remove_wait_queue(&sem->wait, &wait);
-
-void __sched __down(struct semaphore * sem)
-{
- DOWN_VAR
- DOWN_HEAD(TASK_UNINTERRUPTIBLE)
- if (waking_non_zero(sem))
- break;
- schedule();
- DOWN_TAIL(TASK_UNINTERRUPTIBLE)
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
- DOWN_VAR
- DOWN_HEAD(TASK_INTERRUPTIBLE)
-
- ret = waking_non_zero_interruptible(sem, tsk);
- if (ret)
- {
- if (ret == 1)
- /* ret != 0 only if we get interrupted -arca */
- ret = 0;
- break;
- }
- schedule();
- DOWN_TAIL(TASK_INTERRUPTIBLE)
- return ret;
-}
-
-int __down_trylock(struct semaphore * sem)
-{
- return waking_non_zero_trylock(sem);
-}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 82684d05910..06370ccea9e 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -1,5 +1,5 @@
/*
- * arch/xtensa/setup.c
+ * arch/xtensa/kernel/setup.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -16,10 +16,16 @@
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/screen_info.h>
#include <linux/bootmem.h>
#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
# include <linux/console.h>
@@ -33,16 +39,20 @@
# include <linux/seq_file.h>
#endif
-#include <asm/system.h>
#include <asm/bootparam.h>
+#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/timex.h>
#include <asm/platform.h>
#include <asm/page.h>
#include <asm/setup.h>
+#include <asm/param.h>
+#include <asm/traps.h>
+#include <asm/smp.h>
+#include <asm/sysmem.h>
-#include <xtensa/config/system.h>
+#include <platform/hardware.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
@@ -53,47 +63,31 @@ extern struct fd_ops no_fd_ops;
struct fd_ops *fd_ops;
#endif
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
-extern struct ide_ops no_ide_ops;
-struct ide_ops *ide_ops;
-#endif
-
extern struct rtc_ops no_rtc_ops;
struct rtc_ops *rtc_ops;
-#ifdef CONFIG_PC_KEYB
-extern struct kbd_ops no_kbd_ops;
-struct kbd_ops *kbd_ops;
-#endif
-
#ifdef CONFIG_BLK_DEV_INITRD
-extern void *initrd_start;
-extern void *initrd_end;
-extern void *__initrd_start;
-extern void *__initrd_end;
+extern unsigned long initrd_start;
+extern unsigned long initrd_end;
int initrd_is_mapped = 0;
extern int initrd_below_start_ok;
#endif
+#ifdef CONFIG_OF
+void *dtb_start = __dtb_start;
+#endif
+
unsigned char aux_device_present;
extern unsigned long loops_per_jiffy;
/* Command line specified as configuration option. */
-static char command_line[COMMAND_LINE_SIZE];
+static char __initdata command_line[COMMAND_LINE_SIZE];
#ifdef CONFIG_CMDLINE_BOOL
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
#endif
-sysmem_info_t __initdata sysmem;
-
-#ifdef CONFIG_BLK_DEV_INITRD
-int initrd_is_mapped;
-#endif
-
-extern void init_mmu(void);
-
/*
* Boot parameter parsing.
*
@@ -109,30 +103,18 @@ typedef struct tagtable {
} tagtable_t;
#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \
- __attribute__((unused, __section__(".taglist"))) = { tag, fn }
+ __attribute__((used, section(".taglist"))) = { tag, fn }
/* parse current tag */
static int __init parse_tag_mem(const bp_tag_t *tag)
{
- meminfo_t *mi = (meminfo_t*)(tag->data);
+ struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
if (mi->type != MEMORY_TYPE_CONVENTIONAL)
return -1;
- if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
- printk(KERN_WARNING
- "Ignoring memory bank 0x%08lx size %ldKB\n",
- (unsigned long)mi->start,
- (unsigned long)mi->end - (unsigned long)mi->start);
- return -EINVAL;
- }
- sysmem.bank[sysmem.nr_banks].type = mi->type;
- sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start);
- sysmem.bank[sysmem.nr_banks].end = mi->end & PAGE_SIZE;
- sysmem.nr_banks++;
-
- return 0;
+ return add_sysmem_bank(mi->start, mi->end);
}
__tagtable(BP_TAG_MEMORY, parse_tag_mem);
@@ -141,22 +123,33 @@ __tagtable(BP_TAG_MEMORY, parse_tag_mem);
static int __init parse_tag_initrd(const bp_tag_t* tag)
{
- meminfo_t* mi;
- mi = (meminfo_t*)(tag->data);
- initrd_start = (void*)(mi->start);
- initrd_end = (void*)(mi->end);
+ struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
+ initrd_start = (unsigned long)__va(mi->start);
+ initrd_end = (unsigned long)__va(mi->end);
return 0;
}
__tagtable(BP_TAG_INITRD, parse_tag_initrd);
+#ifdef CONFIG_OF
+
+static int __init parse_tag_fdt(const bp_tag_t *tag)
+{
+ dtb_start = __va(tag->data[0]);
+ return 0;
+}
+
+__tagtable(BP_TAG_FDT, parse_tag_fdt);
+
+#endif /* CONFIG_OF */
+
#endif /* CONFIG_BLK_DEV_INITRD */
static int __init parse_tag_cmdline(const bp_tag_t* tag)
{
- strncpy(command_line, (char*)(tag->data), COMMAND_LINE_SIZE);
- command_line[COMMAND_LINE_SIZE - 1] = '\0';
+ strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
return 0;
}
@@ -194,36 +187,108 @@ static int __init parse_bootparam(const bp_tag_t* tag)
return 0;
}
-/*
- * Initialize architecture. (Early stage)
- */
+#ifdef CONFIG_OF
+bool __initdata dt_memory_scan = false;
-void __init init_arch(bp_tag_t *bp_start)
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
+EXPORT_SYMBOL(xtensa_kio_paddr);
+
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
{
+ const __be32 *ranges;
+ int len;
-#ifdef CONFIG_BLK_DEV_INITRD
- initrd_start = &__initrd_start;
- initrd_end = &__initrd_end;
-#endif
+ if (depth > 1)
+ return 0;
+
+ if (!of_flat_dt_is_compatible(node, "simple-bus"))
+ return 0;
- sysmem.nr_banks = 0;
+ ranges = of_get_flat_dt_prop(node, "ranges", &len);
+ if (!ranges)
+ return 1;
+ if (len == 0)
+ return 1;
-#ifdef CONFIG_CMDLINE_BOOL
- strcpy(command_line, default_command_line);
+ xtensa_kio_paddr = of_read_ulong(ranges+1, 1);
+ /* round down to nearest 256MB boundary */
+ xtensa_kio_paddr &= 0xf0000000;
+
+ return 1;
+}
+#else
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ return 1;
+}
#endif
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ if (!dt_memory_scan)
+ return;
+
+ size &= PAGE_MASK;
+ add_sysmem_bank(base, base + size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __alloc_bootmem(size, align, 0);
+}
+
+void __init early_init_devtree(void *params)
+{
+ if (sysmem.nr_banks == 0)
+ dt_memory_scan = true;
+
+ early_init_dt_scan(params);
+ of_scan_flat_dt(xtensa_dt_io_area, NULL);
+
+ if (!command_line[0])
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+}
+
+static int __init xtensa_device_probe(void)
+{
+ of_clk_init(NULL);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ return 0;
+}
+
+device_initcall(xtensa_device_probe);
+
+#endif /* CONFIG_OF */
+
+/*
+ * Initialize architecture. (Early stage)
+ */
+
+void __init init_arch(bp_tag_t *bp_start)
+{
/* Parse boot parameters */
- if (bp_start)
- parse_bootparam(bp_start);
+ if (bp_start)
+ parse_bootparam(bp_start);
+
+#ifdef CONFIG_OF
+ early_init_devtree(dtb_start);
+#endif
if (sysmem.nr_banks == 0) {
- sysmem.nr_banks = 1;
- sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
- sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
- + PLATFORM_DEFAULT_MEM_SIZE;
+ add_sysmem_bank(PLATFORM_DEFAULT_MEM_START,
+ PLATFORM_DEFAULT_MEM_START +
+ PLATFORM_DEFAULT_MEM_SIZE);
}
+#ifdef CONFIG_CMDLINE_BOOL
+ if (!command_line[0])
+ strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
+#endif
+
/* Early hook for platforms */
platform_init(bp_start);
@@ -249,14 +314,146 @@ extern char _UserExceptionVector_literal_start;
extern char _UserExceptionVector_text_end;
extern char _DoubleExceptionVector_literal_start;
extern char _DoubleExceptionVector_text_end;
+#if XCHAL_EXCM_LEVEL >= 2
+extern char _Level2InterruptVector_text_start;
+extern char _Level2InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+extern char _Level3InterruptVector_text_start;
+extern char _Level3InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+extern char _Level4InterruptVector_text_start;
+extern char _Level4InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+extern char _Level5InterruptVector_text_start;
+extern char _Level5InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+extern char _Level6InterruptVector_text_start;
+extern char _Level6InterruptVector_text_end;
+#endif
-void __init setup_arch(char **cmdline_p)
+
+
+#ifdef CONFIG_S32C1I_SELFTEST
+#if XCHAL_HAVE_S32C1I
+
+static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
+
+/*
+ * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
+ *
+ * If *v == cmp, set *v = set. Return previous *v.
+ */
+static inline int probed_compare_swap(int *v, int cmp, int set)
{
- extern int mem_reserve(unsigned long, unsigned long, int);
- extern void bootmem_init(void);
+ int tmp;
+
+ __asm__ __volatile__(
+ " movi %1, 1f\n"
+ " s32i %1, %4, 0\n"
+ " wsr %2, scompare1\n"
+ "1: s32c1i %0, %3, 0\n"
+ : "=a" (set), "=&a" (tmp)
+ : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
+ : "memory"
+ );
+ return set;
+}
+
+/* Handle probed exception */
+
+static void __init do_probed_exception(struct pt_regs *regs,
+ unsigned long exccause)
+{
+ if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
+ regs->pc += 3; /* skip the s32c1i instruction */
+ rcw_exc = exccause;
+ } else {
+ do_unhandled(regs, exccause);
+ }
+}
+
+/* Simple test of S32C1I (soc bringup assist) */
+
+static int __init check_s32c1i(void)
+{
+ int n, cause1, cause2;
+ void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
+
+ rcw_probe_pc = 0;
+ handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
+ do_probed_exception);
+ handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
+ do_probed_exception);
+ handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
+ do_probed_exception);
+
+ /* First try an S32C1I that does not store: */
+ rcw_exc = 0;
+ rcw_word = 1;
+ n = probed_compare_swap(&rcw_word, 0, 2);
+ cause1 = rcw_exc;
+
+ /* took exception? */
+ if (cause1 != 0) {
+ /* unclean exception? */
+ if (n != 2 || rcw_word != 1)
+ panic("S32C1I exception error");
+ } else if (rcw_word != 1 || n != 1) {
+ panic("S32C1I compare error");
+ }
+
+ /* Then an S32C1I that stores: */
+ rcw_exc = 0;
+ rcw_word = 0x1234567;
+ n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
+ cause2 = rcw_exc;
+
+ if (cause2 != 0) {
+ /* unclean exception? */
+ if (n != 0xabcde || rcw_word != 0x1234567)
+ panic("S32C1I exception error (b)");
+ } else if (rcw_word != 0xabcde || n != 0x1234567) {
+ panic("S32C1I store error");
+ }
+
+ /* Verify consistency of exceptions: */
+ if (cause1 || cause2) {
+ pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
+ /* If emulation of S32C1I upon bus error gets implemented,
+ we can get rid of this panic for single core (not SMP) */
+ panic("S32C1I exceptions not currently supported");
+ }
+ if (cause1 != cause2)
+ panic("inconsistent S32C1I exceptions");
+
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
+ return 0;
+}
+
+#else /* XCHAL_HAVE_S32C1I */
- memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
- saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+/* This condition should not occur with a commercially deployed processor.
+ Display reminder for early engr test or demo chips / FPGA bitstreams */
+static int __init check_s32c1i(void)
+{
+ pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
+ return 0;
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+early_initcall(check_s32c1i);
+#endif /* CONFIG_S32C1I_SELFTEST */
+
+
+void __init setup_arch(char **cmdline_p)
+{
+ strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
/* Reserve some memory regions */
@@ -264,9 +461,9 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start < initrd_end) {
initrd_is_mapped = mem_reserve(__pa(initrd_start),
- __pa(initrd_end), 0);
+ __pa(initrd_end), 0) == 0;
initrd_below_start_ok = 1;
- } else {
+ } else {
initrd_start = 0;
}
#endif
@@ -288,12 +485,40 @@ void __init setup_arch(char **cmdline_p)
mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
__pa(&_DoubleExceptionVector_text_end), 0);
+#if XCHAL_EXCM_LEVEL >= 2
+ mem_reserve(__pa(&_Level2InterruptVector_text_start),
+ __pa(&_Level2InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ mem_reserve(__pa(&_Level3InterruptVector_text_start),
+ __pa(&_Level3InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ mem_reserve(__pa(&_Level4InterruptVector_text_start),
+ __pa(&_Level4InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ mem_reserve(__pa(&_Level5InterruptVector_text_start),
+ __pa(&_Level5InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ mem_reserve(__pa(&_Level6InterruptVector_text_start),
+ __pa(&_Level6InterruptVector_text_end), 0);
+#endif
+
+ parse_early_param();
bootmem_init();
+ unflatten_and_copy_device_tree();
+
platform_setup(cmdline_p);
+#ifdef CONFIG_SMP
+ smp_init_cpus();
+#endif
paging_init();
+ zones_init();
#ifdef CONFIG_VT
# if defined(CONFIG_VGA_CONSOLE)
@@ -308,6 +533,22 @@ void __init setup_arch(char **cmdline_p)
#endif
}
+static DEFINE_PER_CPU(struct cpu, cpu_data);
+
+static int __init topology_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct cpu *cpu = &per_cpu(cpu_data, i);
+ cpu->hotpluggable = !!i;
+ register_cpu(cpu, i);
+ }
+
+ return 0;
+}
+subsys_initcall(topology_init);
+
void machine_restart(char * cmd)
{
platform_restart();
@@ -333,21 +574,27 @@ void machine_power_off(void)
static int
c_show(struct seq_file *f, void *slot)
{
+ char buf[NR_CPUS * 5];
+
+ cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask);
/* high-level stuff */
- seq_printf(f,"processor\t: 0\n"
- "vendor_id\t: Tensilica\n"
- "model\t\t: Xtensa " XCHAL_HW_RELEASE_NAME "\n"
- "core ID\t\t: " XCHAL_CORE_ID "\n"
- "build ID\t: 0x%x\n"
- "byte order\t: %s\n"
- "cpu MHz\t\t: %lu.%02lu\n"
- "bogomips\t: %lu.%02lu\n",
- XCHAL_BUILD_UNIQUE_ID,
- XCHAL_HAVE_BE ? "big" : "little",
- CCOUNT_PER_JIFFY/(1000000/HZ),
- (CCOUNT_PER_JIFFY/(10000/HZ)) % 100,
- loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100);
+ seq_printf(f, "CPU count\t: %u\n"
+ "CPU list\t: %s\n"
+ "vendor_id\t: Tensilica\n"
+ "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
+ "core ID\t\t: " XCHAL_CORE_ID "\n"
+ "build ID\t: 0x%x\n"
+ "byte order\t: %s\n"
+ "cpu MHz\t\t: %lu.%02lu\n"
+ "bogomips\t: %lu.%02lu\n",
+ num_online_cpus(),
+ buf,
+ XCHAL_BUILD_UNIQUE_ID,
+ XCHAL_HAVE_BE ? "big" : "little",
+ ccount_freq/1000000,
+ (ccount_freq/10000) % 100,
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100);
seq_printf(f,"flags\t\t: "
#if XCHAL_HAVE_NMI
@@ -395,6 +642,9 @@ c_show(struct seq_file *f, void *slot)
#if XCHAL_HAVE_FP
"fpu "
#endif
+#if XCHAL_HAVE_S32C1I
+ "s32c1i "
+#endif
"\n");
/* Registers. */
@@ -420,32 +670,13 @@ c_show(struct seq_file *f, void *slot)
XCHAL_NUM_TIMERS,
XCHAL_DEBUGLEVEL);
- /* Coprocessors */
-#if XCHAL_HAVE_CP
- seq_printf(f, "coprocessors\t: %d\n", XCHAL_CP_NUM);
-#else
- seq_printf(f, "coprocessors\t: none\n");
-#endif
-
- /* {I,D}{RAM,ROM} and XLMI */
- seq_printf(f,"inst ROMs\t: %d\n"
- "inst RAMs\t: %d\n"
- "data ROMs\t: %d\n"
- "data RAMs\t: %d\n"
- "XLMI ports\t: %d\n",
- XCHAL_NUM_IROM,
- XCHAL_NUM_IRAM,
- XCHAL_NUM_DROM,
- XCHAL_NUM_DRAM,
- XCHAL_NUM_XLMI);
-
/* Cache */
seq_printf(f,"icache line size: %d\n"
"icache ways\t: %d\n"
"icache size\t: %d\n"
"icache flags\t: "
#if XCHAL_ICACHE_LINE_LOCKABLE
- "lock"
+ "lock "
#endif
"\n"
"dcache line size: %d\n"
@@ -453,10 +684,10 @@ c_show(struct seq_file *f, void *slot)
"dcache size\t: %d\n"
"dcache flags\t: "
#if XCHAL_DCACHE_IS_WRITEBACK
- "writeback"
+ "writeback "
#endif
#if XCHAL_DCACHE_LINE_LOCKABLE
- "lock"
+ "lock "
#endif
"\n",
XCHAL_ICACHE_LINESIZE,
@@ -466,24 +697,6 @@ c_show(struct seq_file *f, void *slot)
XCHAL_DCACHE_WAYS,
XCHAL_DCACHE_SIZE);
- /* MMU */
- seq_printf(f,"ASID bits\t: %d\n"
- "ASID invalid\t: %d\n"
- "ASID kernel\t: %d\n"
- "rings\t\t: %d\n"
- "itlb ways\t: %d\n"
- "itlb AR ways\t: %d\n"
- "dtlb ways\t: %d\n"
- "dtlb AR ways\t: %d\n",
- XCHAL_MMU_ASID_BITS,
- XCHAL_MMU_ASID_INVALID,
- XCHAL_MMU_ASID_KERNEL,
- XCHAL_MMU_RINGS,
- XCHAL_ITLB_WAYS,
- XCHAL_ITLB_ARF_WAYS,
- XCHAL_DTLB_WAYS,
- XCHAL_DTLB_ARF_WAYS);
-
return 0;
}
@@ -493,7 +706,7 @@ c_show(struct seq_file *f, void *slot)
static void *
c_start(struct seq_file *f, loff_t *pos)
{
- return (void *) ((*pos == 0) ? (void *)1 : NULL);
+ return (*pos == 0) ? (void *)1 : NULL;
}
static void *
@@ -507,13 +720,12 @@ c_stop(struct seq_file *f, void *v)
{
}
-struct seq_operations cpuinfo_op =
+const struct seq_operations cpuinfo_op =
{
- start: c_start,
- next: c_next,
- stop: c_stop,
- show: c_show
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show,
};
#endif /* CONFIG_PROC_FS */
-
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index c494f0826fc..98b67d5f151 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -1,398 +1,256 @@
-// TODO coprocessor stuff
/*
- * linux/arch/xtensa/kernel/signal.c
+ * arch/xtensa/kernel/signal.c
*
- * Copyright (C) 1991, 1992 Linus Torvalds
- * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
- *
- * Joe Taylor <joe@tensilica.com>
- * Chris Zankel <chris@zankel.net>
+ * Default platform functions.
*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
+ * Copyright (C) 2005, 2006 Tensilica Inc.
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
*
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
*/
-#include <xtensa/config/core.h>
-#include <xtensa/hal.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
-#include <linux/wait.h>
#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
#include <linux/personality.h>
+#include <linux/tracehook.h>
+
#include <asm/ucontext.h>
#include <asm/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#include <asm/coprocessor.h>
+#include <asm/unistd.h>
#define DEBUG_SIG 0
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options,
- struct rusage * ru);
-asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
-
extern struct task_struct *coproc_owners[];
+struct rt_sigframe
+{
+ struct siginfo info;
+ struct ucontext uc;
+ struct {
+ xtregs_opt_t opt;
+ xtregs_user_t user;
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_coprocessor_t cp;
+#endif
+ } xtregs;
+ unsigned char retcode[6];
+ unsigned int window[4];
+};
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
+/*
+ * Flush register windows stored in pt_regs to stack.
+ * Returns 1 for errors.
*/
-int sys_sigsuspend(struct pt_regs *regs)
+int
+flush_window_regs_user(struct pt_regs *regs)
{
- old_sigset_t mask = (old_sigset_t) regs->areg[3];
- sigset_t saveset;
-
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- regs->areg[2] = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(regs, &saveset))
- return -EINTR;
- }
-}
+ const unsigned long ws = regs->windowstart;
+ const unsigned long wb = regs->windowbase;
+ unsigned long sp = 0;
+ unsigned long wm;
+ int err = 1;
+ int base;
-asmlinkage int
-sys_rt_sigsuspend(struct pt_regs *regs)
-{
- sigset_t *unewset = (sigset_t *) regs->areg[4];
- size_t sigsetsize = (size_t) regs->areg[3];
- sigset_t saveset, newset;
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- regs->areg[2] = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(regs, &saveset))
- return -EINTR;
- }
-}
+ /* Return if no other frames. */
-asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction *act,
- struct old_sigaction *oact)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
+ if (regs->wmask == 1)
+ return 0;
- if (act) {
- old_sigset_t mask;
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
- return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
- siginitset(&new_ka.sa.sa_mask, mask);
- }
+ /* Rotate windowmask and skip empty frames. */
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+ wm = (ws >> wb) | (ws << (XCHAL_NUM_AREGS / 4 - wb));
+ base = (XCHAL_NUM_AREGS / 4) - (regs->wmask >> 4);
+
+ /* For call8 or call12 frames, we need the previous stack pointer. */
- if (!ret && oact) {
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
- return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
- }
+ if ((regs->wmask & 2) == 0)
+ if (__get_user(sp, (int*)(regs->areg[base * 4 + 1] - 12)))
+ goto errout;
- return ret;
-}
+ /* Spill frames to stack. */
-asmlinkage int
-sys_sigaltstack(struct pt_regs *regs)
-{
- const stack_t *uss = (stack_t *) regs->areg[4];
- stack_t *uoss = (stack_t *) regs->areg[3];
+ while (base < XCHAL_NUM_AREGS / 4) {
- if (regs->depc > 64)
- panic ("Double exception sys_sigreturn\n");
+ int m = (wm >> base);
+ int inc = 0;
+ /* Save registers a4..a7 (call8) or a4...a11 (call12) */
- return do_sigaltstack(uss, uoss, regs->areg[1]);
-}
+ if (m & 2) { /* call4 */
+ inc = 1;
+ } else if (m & 4) { /* call8 */
+ if (copy_to_user((void*)(sp - 32),
+ &regs->areg[(base + 1) * 4], 16))
+ goto errout;
+ inc = 2;
-/*
- * Do a signal return; undo the signal stack.
- */
+ } else if (m & 8) { /* call12 */
+ if (copy_to_user((void*)(sp - 48),
+ &regs->areg[(base + 1) * 4], 32))
+ goto errout;
+ inc = 3;
+ }
-struct sigframe
-{
- struct sigcontext sc;
- struct _cpstate cpstate;
- unsigned long extramask[_NSIG_WORDS-1];
- unsigned char retcode[6];
- unsigned int reserved[4]; /* Reserved area for chaining */
- unsigned int window[4]; /* Window of 4 registers for initial context */
-};
+ /* Save current frame a0..a3 under next SP */
-struct rt_sigframe
-{
- struct siginfo info;
- struct ucontext uc;
- struct _cpstate cpstate;
- unsigned char retcode[6];
- unsigned int reserved[4]; /* Reserved area for chaining */
- unsigned int window[4]; /* Window of 4 registers for initial context */
-};
+ sp = regs->areg[((base + inc) * 4 + 1) % XCHAL_NUM_AREGS];
+ if (copy_to_user((void*)(sp - 16), &regs->areg[base * 4], 16))
+ goto errout;
-extern void release_all_cp (struct task_struct *);
+ /* Get current stack pointer for next loop iteration. */
+ sp = regs->areg[base * 4 + 1];
+ base += inc;
+ }
-// FIXME restore_cpextra
-static inline int
-restore_cpextra (struct _cpstate *buf)
-{
-#if 0
- /* The signal handler may have used coprocessors in which
- * case they are still enabled. We disable them to force a
- * reloading of the original task's CP state by the lazy
- * context-switching mechanisms of CP exception handling.
- * Also, we essentially discard any coprocessor state that the
- * signal handler created. */
+ regs->wmask = 1;
+ regs->windowstart = 1 << wb;
- struct task_struct *tsk = current;
- release_all_cp(tsk);
- return __copy_from_user(tsk->thread.cpextra, buf, XTENSA_CP_EXTRA_SIZE);
-#endif
return 0;
+
+errout:
+ return err;
}
-/* Note: We don't copy double exception 'tregs', we have to finish double exc. first before we return to signal handler! This dbl.exc.handler might cause another double exception, but I think we are fine as the situation is the same as if we had returned to the signal handerl and got an interrupt immediately...
+/*
+ * Note: We don't copy double exception 'regs', we have to finish double exc.
+ * first before we return to signal handler! This dbl.exc.handler might cause
+ * another double exception, but I think we are fine as the situation is the
+ * same as if we had returned to the signal handerl and got an interrupt
+ * immediately...
*/
-
static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
{
- struct thread_struct *thread;
- unsigned int err = 0;
- unsigned long ps;
- struct _cpstate *buf;
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+ struct thread_info *ti = current_thread_info();
+ int err = 0;
-#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
+#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
COPY(pc);
- COPY(depc);
- COPY(wmask);
+ COPY(ps);
COPY(lbeg);
COPY(lend);
COPY(lcount);
COPY(sar);
- COPY(windowbase);
- COPY(windowstart);
#undef COPY
- /* For PS, restore only PS.CALLINC.
- * Assume that all other bits are either the same as for the signal
- * handler, or the user mode value doesn't matter (e.g. PS.OWB).
- */
- err |= __get_user(ps, &sc->sc_ps);
- regs->ps = (regs->ps & ~XCHAL_PS_CALLINC_MASK)
- | (ps & XCHAL_PS_CALLINC_MASK);
-
- /* Additional corruption checks */
-
- if ((regs->windowbase >= (XCHAL_NUM_AREGS/4))
- || ((regs->windowstart & ~((1<<(XCHAL_NUM_AREGS/4)) - 1)) != 0) )
- err = 1;
- if ((regs->lcount > 0)
- && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
- err = 1;
-
- /* Restore extended register state.
- * See struct thread_struct in processor.h.
- */
- thread = &current->thread;
-
- err |= __copy_from_user (regs->areg, sc->sc_areg, XCHAL_NUM_AREGS*4);
- err |= __get_user(buf, &sc->sc_cpstate);
- if (buf) {
- if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
- goto badframe;
- err |= restore_cpextra(buf);
- }
-
- regs->syscall = -1; /* disable syscall checks */
- return err;
-
-badframe:
- return 1;
-}
+ err |= flush_window_regs_user(regs);
+ err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4);
+ err |= __put_user(0, &sc->sc_xtregs);
-static inline void
-flush_my_cpstate(struct task_struct *tsk)
-{
- unsigned long flags;
- local_irq_save(flags);
-
-#if 0 // FIXME
- for (i = 0; i < XCHAL_CP_NUM; i++) {
- if (tsk == coproc_owners[i]) {
- xthal_validate_cp(i);
- xthal_save_cpregs(tsk->thread.cpregs_ptr[i], i);
-
- /* Invalidate and "disown" the cp to allow
- * callers the chance to reset cp state in the
- * task_struct. */
+ if (err)
+ return err;
- xthal_invalidate_cp(i);
- coproc_owners[i] = 0;
- }
- }
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_flush_all(ti);
+ coprocessor_release_all(ti);
+ err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
+ sizeof (frame->xtregs.cp));
#endif
- local_irq_restore(flags);
-}
-
-/* Return codes:
- 0: nothing saved
- 1: stuff to save, successful
- -1: stuff to save, error happened
-*/
-static int
-save_cpextra (struct _cpstate *buf)
-{
-#if (XCHAL_EXTRA_SA_SIZE == 0) && (XCHAL_CP_NUM == 0)
- return 0;
-#else
+ err |= __copy_to_user(&frame->xtregs.opt, &regs->xtregs_opt,
+ sizeof (xtregs_opt_t));
+ err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user,
+ sizeof (xtregs_user_t));
- /* FIXME: If a task has never used a coprocessor, there is
- * no need to save and restore anything. Tracking this
- * information would allow us to optimize this section.
- * Perhaps we can use current->used_math or (current->flags &
- * PF_USEDFPU) or define a new field in the thread
- * structure. */
-
- /* We flush any live, task-owned cp state to the task_struct,
- * then copy it all to the sigframe. Then we clear all
- * cp/extra state in the task_struct, effectively
- * clearing/resetting all cp/extra state for the signal
- * handler (cp-exception handling will load these new values
- * into the cp/extra registers.) This step is important for
- * things like a floating-point cp, where the OS must reset
- * the FCR to the default rounding mode. */
+ err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs);
- int err = 0;
- struct task_struct *tsk = current;
-
- flush_my_cpstate(tsk);
- /* Note that we just copy everything: 'extra' and 'cp' state together.*/
- err |= __copy_to_user(buf, tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
- memset(tsk->thread.cp_save, 0, XTENSA_CP_EXTRA_SIZE);
-
-#if (XTENSA_CP_EXTRA_SIZE == 0)
-#error Sanity check on memset above, cpextra_size should not be zero.
-#endif
-
- return err ? -1 : 1;
-#endif
+ return err;
}
static int
-setup_sigcontext(struct sigcontext *sc, struct _cpstate *cpstate,
- struct pt_regs *regs, unsigned long mask)
+restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
{
- struct thread_struct *thread;
- int err = 0;
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+ struct thread_info *ti = current_thread_info();
+ unsigned int err = 0;
+ unsigned long ps;
-//printk("setup_sigcontext\n");
-#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
+#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(pc);
- COPY(ps);
- COPY(depc);
- COPY(wmask);
COPY(lbeg);
COPY(lend);
COPY(lcount);
COPY(sar);
- COPY(windowbase);
- COPY(windowstart);
#undef COPY
- /* Save extended register state.
- * See struct thread_struct in processor.h.
- */
- thread = &current->thread;
- err |= __copy_to_user (sc->sc_areg, regs->areg, XCHAL_NUM_AREGS * 4);
- err |= save_cpextra(cpstate);
- err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate);
- /* non-iBCS2 extensions.. */
- err |= __put_user(mask, &sc->oldmask);
+ /* All registers were flushed to stack. Start with a prestine frame. */
- return err;
-}
+ regs->wmask = 1;
+ regs->windowbase = 0;
+ regs->windowstart = 1;
-asmlinkage int sys_sigreturn(struct pt_regs *regs)
-{
- struct sigframe *frame = (struct sigframe *)regs->areg[1];
- sigset_t set;
- if (regs->depc > 64)
- panic ("Double exception sys_sigreturn\n");
+ regs->syscall = -1; /* disable syscall checks */
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
+ /* For PS, restore only PS.CALLINC.
+ * Assume that all other bits are either the same as for the signal
+ * handler, or the user mode value doesn't matter (e.g. PS.OWB).
+ */
+ err |= __get_user(ps, &sc->sc_ps);
+ regs->ps = (regs->ps & ~PS_CALLINC_MASK) | (ps & PS_CALLINC_MASK);
- if (__get_user(set.sig[0], &frame->sc.oldmask)
- || (_NSIG_WORDS > 1
- && __copy_from_user(&set.sig[1], &frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
+ /* Additional corruption checks */
- sigdelsetmask(&set, ~_BLOCKABLE);
+ if ((regs->lcount > 0)
+ && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
+ err = 1;
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4);
- if (restore_sigcontext(regs, &frame->sc))
- goto badframe;
- return regs->areg[2];
+ if (err)
+ return err;
-badframe:
- force_sig(SIGSEGV, current);
- return 0;
+ /* The signal handler may have used coprocessors in which
+ * case they are still enabled. We disable them to force a
+ * reloading of the original task's CP state by the lazy
+ * context-switching mechanisms of CP exception handling.
+ * Also, we essentially discard any coprocessor state that the
+ * signal handler created. */
+
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_release_all(ti);
+ err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp,
+ sizeof (frame->xtregs.cp));
+#endif
+ err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user,
+ sizeof (xtregs_user_t));
+ err |= __copy_from_user(&regs->xtregs_opt, &frame->xtregs.opt,
+ sizeof (xtregs_opt_t));
+
+ return err;
}
-asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
+ long a4, long a5, struct pt_regs *regs)
{
- struct rt_sigframe *frame = (struct rt_sigframe *)regs->areg[1];
+ struct rt_sigframe __user *frame;
sigset_t set;
- stack_t st;
int ret;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
if (regs->depc > 64)
- {
- printk("!!!!!!! DEPC !!!!!!!\n");
- return 0;
- }
+ panic("rt_sigreturn in double exception!\n");
+
+ frame = (struct rt_sigframe __user *) regs->areg[1];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@@ -400,21 +258,15 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+ if (restore_sigcontext(regs, frame))
goto badframe;
+
ret = regs->areg[2];
- if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
- do_sigaltstack(&st, NULL, regs->areg[1]);
return ret;
@@ -423,177 +275,84 @@ badframe:
return 0;
}
-/*
- * Set up a signal frame.
- */
+
/*
- * Determine which stack to use..
+ * Set up a signal frame.
*/
-static inline void *
-get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
-{
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
- sp = current->sas_ss_sp + current->sas_ss_size;
-
- return (void *)((sp - frame_size) & -16ul);
-}
-
-#define USE_SIGRETURN 0
-#define USE_RT_SIGRETURN 1
static int
-gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
+gen_return_code(unsigned char *codemem)
{
- unsigned int retcall;
int err = 0;
-#if 0
- /* Ignoring SA_RESTORER for now; it's supposed to be obsolete,
- * and the xtensa glibc doesn't use it.
+ /*
+ * The 12-bit immediate is really split up within the 24-bit MOVI
+ * instruction. As long as the above system call numbers fit within
+ * 8-bits, the following code works fine. See the Xtensa ISA for
+ * details.
*/
- if (ka->sa.sa_flags & SA_RESTORER) {
- regs->pr = (unsigned long) ka->sa.sa_restorer;
- } else
-#endif /* 0 */
- {
-
-#if (__NR_sigreturn > 255) || (__NR_rt_sigreturn > 255)
-
-/* The 12-bit immediate is really split up within the 24-bit MOVI
- * instruction. As long as the above system call numbers fit within
- * 8-bits, the following code works fine. See the Xtensa ISA for
- * details.
- */
-#error Generating the MOVI instruction below breaks!
+#if __NR_rt_sigreturn > 255
+# error Generating the MOVI instruction below breaks!
#endif
- retcall = use_rt_sigreturn ? __NR_rt_sigreturn : __NR_sigreturn;
-
#ifdef __XTENSA_EB__ /* Big Endian version */
- /* Generate instruction: MOVI a2, retcall */
- err |= __put_user(0x22, &codemem[0]);
- err |= __put_user(0x0a, &codemem[1]);
- err |= __put_user(retcall, &codemem[2]);
- /* Generate instruction: SYSCALL */
- err |= __put_user(0x00, &codemem[3]);
- err |= __put_user(0x05, &codemem[4]);
- err |= __put_user(0x00, &codemem[5]);
+ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
+ err |= __put_user(0x22, &codemem[0]);
+ err |= __put_user(0x0a, &codemem[1]);
+ err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
+ /* Generate instruction: SYSCALL */
+ err |= __put_user(0x00, &codemem[3]);
+ err |= __put_user(0x05, &codemem[4]);
+ err |= __put_user(0x00, &codemem[5]);
#elif defined __XTENSA_EL__ /* Little Endian version */
- /* Generate instruction: MOVI a2, retcall */
- err |= __put_user(0x22, &codemem[0]);
- err |= __put_user(0xa0, &codemem[1]);
- err |= __put_user(retcall, &codemem[2]);
- /* Generate instruction: SYSCALL */
- err |= __put_user(0x00, &codemem[3]);
- err |= __put_user(0x50, &codemem[4]);
- err |= __put_user(0x00, &codemem[5]);
+ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
+ err |= __put_user(0x22, &codemem[0]);
+ err |= __put_user(0xa0, &codemem[1]);
+ err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
+ /* Generate instruction: SYSCALL */
+ err |= __put_user(0x00, &codemem[3]);
+ err |= __put_user(0x50, &codemem[4]);
+ err |= __put_user(0x00, &codemem[5]);
#else
-#error Must use compiler for Xtensa processors.
+# error Must use compiler for Xtensa processors.
#endif
- }
/* Flush generated code out of the data cache */
- if (err == 0)
- __flush_invalidate_cache_range((unsigned long)codemem, 6UL);
+ if (err == 0) {
+ __invalidate_icache_range((unsigned long)codemem, 6UL);
+ __flush_invalidate_dcache_range((unsigned long)codemem, 6UL);
+ }
return err;
}
-static void
-set_thread_state(struct pt_regs *regs, void *stack, unsigned char *retaddr,
- void *handler, unsigned long arg1, void *arg2, void *arg3)
-{
- /* Set up registers for signal handler */
- start_thread(regs, (unsigned long) handler, (unsigned long) stack);
-
- /* Set up a stack frame for a call4
- * Note: PS.CALLINC is set to one by start_thread
- */
- regs->areg[4] = (((unsigned long) retaddr) & 0x3fffffff) | 0x40000000;
- regs->areg[6] = arg1;
- regs->areg[7] = (unsigned long) arg2;
- regs->areg[8] = (unsigned long) arg3;
-}
-static void setup_frame(int sig, struct k_sigaction *ka,
- sigset_t *set, struct pt_regs *regs)
+static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
{
- struct sigframe *frame;
+ struct rt_sigframe *frame;
int err = 0;
int signal;
+ unsigned long sp, ra, tp;
- frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
- if (regs->depc > 64)
- {
- printk("!!!!!!! DEPC !!!!!!!\n");
- return;
- }
-
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto give_sigsegv;
+ sp = regs->areg[1];
- signal = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
- err |= setup_sigcontext(&frame->sc, &frame->cpstate, regs, set->sig[0]);
-
- if (_NSIG_WORDS > 1) {
- err |= __copy_to_user(frame->extramask, &set->sig[1],
- sizeof(frame->extramask));
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
+ sp = current->sas_ss_sp + current->sas_ss_size;
}
- /* Create sys_sigreturn syscall in stack frame */
- err |= gen_return_code(frame->retcode, USE_SIGRETURN);
-
- if (err)
- goto give_sigsegv;
-
- /* Create signal handler execution context.
- * Return context not modified until this point.
- */
- set_thread_state(regs, frame, frame->retcode,
- ka->sa.sa_handler, signal, &frame->sc, NULL);
-
- /* Set access mode to USER_DS. Nomenclature is outdated, but
- * functionality is used in uaccess.h
- */
- set_fs(USER_DS);
-
-
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
- current->comm, current->pid, signal, frame, regs->pc);
-#endif
-
- return;
-
-give_sigsegv:
- if (sig == SIGSEGV)
- ka->sa.sa_handler = SIG_DFL;
- force_sig(SIGSEGV, current);
-}
-
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *set, struct pt_regs *regs)
-{
- struct rt_sigframe *frame;
- int err = 0;
- int signal;
+ frame = (void *)((sp - sizeof(*frame)) & -16ul);
- frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
if (regs->depc > 64)
panic ("Double exception sys_sigreturn\n");
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) {
goto give_sigsegv;
+ }
signal = current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
@@ -601,31 +360,50 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig;
- err |= copy_siginfo_to_user(&frame->info, info);
+ if (ka->sa.sa_flags & SA_SIGINFO) {
+ err |= copy_siginfo_to_user(&frame->info, info);
+ }
+
+ /* Create the user context. */
- /* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user((void *)current->sas_ss_sp,
- &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->areg[1]),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->cpstate,
- regs, set->sig[0]);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->areg[1]);
+ err |= setup_sigcontext(frame, regs);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- /* Create sys_rt_sigreturn syscall in stack frame */
- err |= gen_return_code(frame->retcode, USE_RT_SIGRETURN);
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ ra = (unsigned long)ka->sa.sa_restorer;
+ } else {
- if (err)
- goto give_sigsegv;
+ /* Create sys_rt_sigreturn syscall in stack frame */
+
+ err |= gen_return_code(frame->retcode);
- /* Create signal handler execution context.
+ if (err) {
+ goto give_sigsegv;
+ }
+ ra = (unsigned long) frame->retcode;
+ }
+
+ /*
+ * Create signal handler execution context.
* Return context not modified until this point.
*/
- set_thread_state(regs, frame, frame->retcode,
- ka->sa.sa_handler, signal, &frame->info, &frame->uc);
+
+ /* Set up registers for signal handler; preserve the threadptr */
+ tp = regs->threadptr;
+ start_thread(regs, (unsigned long) ka->sa.sa_handler,
+ (unsigned long) frame);
+
+ /* Set up a stack frame for a call4
+ * Note: PS.CALLINC is set to one by start_thread
+ */
+ regs->areg[4] = (((unsigned long) ra) & 0x3fffffff) | 0x40000000;
+ regs->areg[6] = (unsigned long) signal;
+ regs->areg[7] = (unsigned long) &frame->info;
+ regs->areg[8] = (unsigned long) &frame->uc;
+ regs->threadptr = tp;
/* Set access mode to USER_DS. Nomenclature is outdated, but
* functionality is used in uaccess.h
@@ -637,16 +415,13 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, signal, frame, regs->pc);
#endif
- return;
+ return 0;
give_sigsegv:
- if (sig == SIGSEGV)
- ka->sa.sa_handler = SIG_DFL;
- force_sig(SIGSEGV, current);
+ force_sigsegv(sig, current);
+ return -EFAULT;
}
-
-
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
@@ -656,57 +431,92 @@ give_sigsegv:
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void do_signal(struct pt_regs *regs)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
- if (!oldset)
- oldset = &current->blocked;
+ task_pt_regs(current)->icountlevel = 0;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- /* Are we from a system call? */
- if (regs->syscall >= 0) {
- /* If so, check system call restarting.. */
- switch (regs->areg[2]) {
- case ERESTARTNOHAND:
- case ERESTART_RESTARTBLOCK:
- regs->areg[2] = -EINTR;
- break;
+ if (signr > 0) {
+ int ret;
+
+ /* Are we from a system call? */
+
+ if ((signed)regs->syscall >= 0) {
- case ERESTARTSYS:
- if (!(ka.sa.sa_flags & SA_RESTART)) {
+ /* If so, check system call restarting.. */
+
+ switch (regs->areg[2]) {
+ case -ERESTARTNOHAND:
+ case -ERESTART_RESTARTBLOCK:
regs->areg[2] = -EINTR;
break;
- }
- /* fallthrough */
- case ERESTARTNOINTR:
- regs->areg[2] = regs->syscall;
- regs->pc -= 3;
+
+ case -ERESTARTSYS:
+ if (!(ka.sa.sa_flags & SA_RESTART)) {
+ regs->areg[2] = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->areg[2] = regs->syscall;
+ regs->pc -= 3;
+ break;
+
+ default:
+ /* nothing to do */
+ if (regs->areg[2] != 0)
+ break;
+ }
}
+
+ /* Whee! Actually deliver the signal. */
+ /* Set up the stack frame */
+ ret = setup_frame(signr, &ka, &info, sigmask_to_save(), regs);
+ if (ret)
+ return;
+
+ signal_delivered(signr, &info, &ka, regs, 0);
+ if (current->ptrace & PT_SINGLESTEP)
+ task_pt_regs(current)->icountlevel = 1;
+
+ return;
}
- if (signr == 0)
- return 0; /* no signals delivered */
+ /* Did we come from a system call? */
+ if ((signed) regs->syscall >= 0) {
+ /* Restart the system call - no handlers present */
+ switch (regs->areg[2]) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->areg[2] = regs->syscall;
+ regs->pc -= 3;
+ break;
+ case -ERESTART_RESTARTBLOCK:
+ regs->areg[2] = __NR_restart_syscall;
+ regs->pc -= 3;
+ break;
+ }
+ }
- /* Whee! Actually deliver the signal. */
+ /* If there's no signal to deliver, we just restore the saved mask. */
+ restore_saved_sigmask();
- /* Set up the stack frame */
- if (ka.sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(signr, &ka, &info, oldset, regs);
- else
- setup_frame(signr, &ka, oldset, regs);
+ if (current->ptrace & PT_SINGLESTEP)
+ task_pt_regs(current)->icountlevel = 1;
+ return;
+}
- if (ka.sa.sa_flags & SA_ONESHOT)
- ka.sa.sa_handler = SIG_DFL;
+void do_notify_resume(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SIGPENDING))
+ do_signal(regs);
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask);
- if (!(ka.sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked, signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- return 1;
+ if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+ tracehook_notify_resume(regs);
}
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
new file mode 100644
index 00000000000..40b5a3771fb
--- /dev/null
+++ b/arch/xtensa/kernel/smp.c
@@ -0,0 +1,607 @@
+/*
+ * Xtensa SMP support functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2013 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ * Pete Delaney <piet@tensilica.com
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/thread_info.h>
+
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/mmu_context.h>
+#include <asm/mxregs.h>
+#include <asm/platform.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+#ifdef CONFIG_SMP
+# if XCHAL_HAVE_S32C1I == 0
+# error "The S32C1I option is required for SMP."
+# endif
+#endif
+
+static void system_invalidate_dcache_range(unsigned long start,
+ unsigned long size);
+static void system_flush_invalidate_dcache_range(unsigned long start,
+ unsigned long size);
+
+/* IPI (Inter Process Interrupt) */
+
+#define IPI_IRQ 0
+
+static irqreturn_t ipi_interrupt(int irq, void *dev_id);
+static struct irqaction ipi_irqaction = {
+ .handler = ipi_interrupt,
+ .flags = IRQF_PERCPU,
+ .name = "ipi",
+};
+
+void ipi_init(void)
+{
+ unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
+ setup_irq(irq, &ipi_irqaction);
+}
+
+static inline unsigned int get_core_count(void)
+{
+ /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
+ unsigned int syscfgid = get_er(SYSCFGID);
+ return ((syscfgid >> 18) & 0xf) + 1;
+}
+
+static inline int get_core_id(void)
+{
+ /* Bits 0...18 of SYSCFGID contain the core id */
+ unsigned int core_id = get_er(SYSCFGID);
+ return core_id & 0x3fff;
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned i;
+
+ for (i = 0; i < max_cpus; ++i)
+ set_cpu_present(i, true);
+}
+
+void __init smp_init_cpus(void)
+{
+ unsigned i;
+ unsigned int ncpus = get_core_count();
+ unsigned int core_id = get_core_id();
+
+ pr_info("%s: Core Count = %d\n", __func__, ncpus);
+ pr_info("%s: Core Id = %d\n", __func__, core_id);
+
+ for (i = 0; i < ncpus; ++i)
+ set_cpu_possible(i, true);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ unsigned int cpu = smp_processor_id();
+ BUG_ON(cpu != 0);
+ cpu_asid_cache(cpu) = ASID_USER_FIRST;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
+static DECLARE_COMPLETION(cpu_running);
+
+void secondary_start_kernel(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ init_mmu();
+
+#ifdef CONFIG_DEBUG_KERNEL
+ if (boot_secondary_processors == 0) {
+ pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
+ __func__, boot_secondary_processors, cpu);
+ for (;;)
+ __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
+ }
+
+ pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
+ __func__, boot_secondary_processors, cpu);
+#endif
+ /* Init EXCSAVE1 */
+
+ secondary_trap_init();
+
+ /* All kernel threads share the same mm context. */
+
+ atomic_inc(&mm->mm_users);
+ atomic_inc(&mm->mm_count);
+ current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+ enter_lazy_tlb(mm, current);
+
+ preempt_disable();
+ trace_hardirqs_off();
+
+ calibrate_delay();
+
+ notify_cpu_starting(cpu);
+
+ secondary_init_irq();
+ local_timer_setup(cpu);
+
+ set_cpu_online(cpu, true);
+
+ local_irq_enable();
+
+ complete(&cpu_running);
+
+ cpu_startup_entry(CPUHP_ONLINE);
+}
+
+static void mx_cpu_start(void *p)
+{
+ unsigned cpu = (unsigned)p;
+ unsigned long run_stall_mask = get_er(MPSCORE);
+
+ set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
+ pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+ __func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+static void mx_cpu_stop(void *p)
+{
+ unsigned cpu = (unsigned)p;
+ unsigned long run_stall_mask = get_er(MPSCORE);
+
+ set_er(run_stall_mask | (1u << cpu), MPSCORE);
+ pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+ __func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+unsigned long cpu_start_id __cacheline_aligned;
+#endif
+unsigned long cpu_start_ccount;
+
+static int boot_secondary(unsigned int cpu, struct task_struct *ts)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ unsigned long ccount;
+ int i;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ cpu_start_id = cpu;
+ system_flush_invalidate_dcache_range(
+ (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
+#endif
+ smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
+
+ for (i = 0; i < 2; ++i) {
+ do
+ ccount = get_ccount();
+ while (!ccount);
+
+ cpu_start_ccount = ccount;
+
+ while (time_before(jiffies, timeout)) {
+ mb();
+ if (!cpu_start_ccount)
+ break;
+ }
+
+ if (cpu_start_ccount) {
+ smp_call_function_single(0, mx_cpu_stop,
+ (void *)cpu, 1);
+ cpu_start_ccount = 0;
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ int ret = 0;
+
+ if (cpu_asid_cache(cpu) == 0)
+ cpu_asid_cache(cpu) = ASID_USER_FIRST;
+
+ start_info.stack = (unsigned long)task_pt_regs(idle);
+ wmb();
+
+ pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
+ __func__, cpu, idle, start_info.stack);
+
+ ret = boot_secondary(cpu, idle);
+ if (ret == 0) {
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
+ if (!cpu_online(cpu))
+ ret = -EIO;
+ }
+
+ if (ret)
+ pr_err("CPU %u failed to boot\n", cpu);
+
+ return ret;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Flush user cache and TLB mappings, and then remove this CPU
+ * from the vm mask set of all processes.
+ */
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ invalidate_page_directory();
+
+ clear_tasks_mm_cpumask(cpu);
+
+ return 0;
+}
+
+static void platform_cpu_kill(unsigned int cpu)
+{
+ smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ while (time_before(jiffies, timeout)) {
+ system_invalidate_dcache_range((unsigned long)&cpu_start_id,
+ sizeof(cpu_start_id));
+ if (cpu_start_id == -cpu) {
+ platform_cpu_kill(cpu);
+ return;
+ }
+ }
+ pr_err("CPU%u: unable to kill\n", cpu);
+}
+
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void __ref cpu_die(void)
+{
+ idle_task_exit();
+ local_irq_disable();
+ __asm__ __volatile__(
+ " movi a2, cpu_restart\n"
+ " jx a2\n");
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+enum ipi_msg_type {
+ IPI_RESCHEDULE = 0,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+ IPI_MAX
+};
+
+static const struct {
+ const char *short_text;
+ const char *long_text;
+} ipi_text[] = {
+ { .short_text = "RES", .long_text = "Rescheduling interrupts" },
+ { .short_text = "CAL", .long_text = "Function call interrupts" },
+ { .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
+};
+
+struct ipi_data {
+ unsigned long ipi_count[IPI_MAX];
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static void send_ipi_message(const struct cpumask *callmask,
+ enum ipi_msg_type msg_id)
+{
+ int index;
+ unsigned long mask = 0;
+
+ for_each_cpu(index, callmask)
+ if (index != smp_processor_id())
+ mask |= 1 << index;
+
+ set_er(mask, MIPISET(msg_id));
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ send_ipi_message(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+ send_ipi_message(&targets, IPI_CPU_STOP);
+}
+
+static void ipi_cpu_stop(unsigned int cpu)
+{
+ set_cpu_online(cpu, false);
+ machine_halt();
+}
+
+irqreturn_t ipi_interrupt(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ unsigned int msg;
+ unsigned i;
+
+ msg = get_er(MIPICAUSE(cpu));
+ for (i = 0; i < IPI_MAX; i++)
+ if (msg & (1 << i)) {
+ set_er(1 << i, MIPICAUSE(cpu));
+ ++ipi->ipi_count[i];
+ }
+
+ if (msg & (1 << IPI_RESCHEDULE))
+ scheduler_ipi();
+ if (msg & (1 << IPI_CALL_FUNC))
+ generic_smp_call_function_interrupt();
+ if (msg & (1 << IPI_CPU_STOP))
+ ipi_cpu_stop(cpu);
+
+ return IRQ_HANDLED;
+}
+
+void show_ipi_list(struct seq_file *p, int prec)
+{
+ unsigned int cpu;
+ unsigned i;
+
+ for (i = 0; i < IPI_MAX; ++i) {
+ seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
+ for_each_online_cpu(cpu)
+ seq_printf(p, " %10lu",
+ per_cpu(ipi_data, cpu).ipi_count[i]);
+ seq_printf(p, " %s\n", ipi_text[i].long_text);
+ }
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ pr_debug("setup_profiling_timer %d\n", multiplier);
+ return 0;
+}
+
+/* TLB flush functions */
+
+struct flush_data {
+ struct vm_area_struct *vma;
+ unsigned long addr1;
+ unsigned long addr2;
+};
+
+static void ipi_flush_tlb_all(void *arg)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+}
+
+static void ipi_flush_tlb_mm(void *arg)
+{
+ local_flush_tlb_mm(arg);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ on_each_cpu(ipi_flush_tlb_mm, mm, 1);
+}
+
+static void ipi_flush_tlb_page(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = addr,
+ };
+ on_each_cpu(ipi_flush_tlb_page, &fd, 1);
+}
+
+static void ipi_flush_tlb_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_tlb_range, &fd, 1);
+}
+
+static void ipi_flush_tlb_kernel_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
+}
+
+/* Cache flush functions */
+
+static void ipi_flush_cache_all(void *arg)
+{
+ local_flush_cache_all();
+}
+
+void flush_cache_all(void)
+{
+ on_each_cpu(ipi_flush_cache_all, NULL, 1);
+}
+
+static void ipi_flush_cache_page(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = address,
+ .addr2 = pfn,
+ };
+ on_each_cpu(ipi_flush_cache_page, &fd, 1);
+}
+
+static void ipi_flush_cache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_cache_range, &fd, 1);
+}
+
+static void ipi_flush_icache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_icache_range(fd->addr1, fd->addr2);
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_icache_range, &fd, 1);
+}
+
+/* ------------------------------------------------------------------------- */
+
+static void ipi_invalidate_dcache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ __invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_invalidate_dcache_range(unsigned long start,
+ unsigned long size)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = size,
+ };
+ on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
+}
+
+static void ipi_flush_invalidate_dcache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ __flush_invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_flush_invalidate_dcache_range(unsigned long start,
+ unsigned long size)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = size,
+ };
+ on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
+}
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
new file mode 100644
index 00000000000..7d2c317bd98
--- /dev/null
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -0,0 +1,120 @@
+/*
+ * arch/xtensa/kernel/stacktrace.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+#include <asm/traps.h>
+
+void walk_stackframe(unsigned long *sp,
+ int (*fn)(struct stackframe *frame, void *data),
+ void *data)
+{
+ unsigned long a0, a1;
+ unsigned long sp_end;
+
+ a1 = (unsigned long)sp;
+ sp_end = ALIGN(a1, THREAD_SIZE);
+
+ spill_registers();
+
+ while (a1 < sp_end) {
+ struct stackframe frame;
+
+ sp = (unsigned long *)a1;
+
+ a0 = *(sp - 4);
+ a1 = *(sp - 3);
+
+ if (a1 <= (unsigned long)sp)
+ break;
+
+ frame.pc = MAKE_PC_FROM_RA(a0, a1);
+ frame.sp = a1;
+
+ if (fn(&frame, data))
+ return;
+ }
+}
+
+#ifdef CONFIG_STACKTRACE
+
+struct stack_trace_data {
+ struct stack_trace *trace;
+ unsigned skip;
+};
+
+static int stack_trace_cb(struct stackframe *frame, void *data)
+{
+ struct stack_trace_data *trace_data = data;
+ struct stack_trace *trace = trace_data->trace;
+
+ if (trace_data->skip) {
+ --trace_data->skip;
+ return 0;
+ }
+ if (!kernel_text_address(frame->pc))
+ return 0;
+
+ trace->entries[trace->nr_entries++] = frame->pc;
+ return trace->nr_entries >= trace->max_entries;
+}
+
+void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
+{
+ struct stack_trace_data trace_data = {
+ .trace = trace,
+ .skip = trace->skip,
+ };
+ walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif
+
+#ifdef CONFIG_FRAME_POINTER
+
+struct return_addr_data {
+ unsigned long addr;
+ unsigned skip;
+};
+
+static int return_address_cb(struct stackframe *frame, void *data)
+{
+ struct return_addr_data *r = data;
+
+ if (r->skip) {
+ --r->skip;
+ return 0;
+ }
+ if (!kernel_text_address(frame->pc))
+ return 0;
+ r->addr = frame->pc;
+ return 1;
+}
+
+unsigned long return_address(unsigned level)
+{
+ struct return_addr_data r = {
+ .skip = level + 1,
+ };
+ walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
+ return r.addr;
+}
+EXPORT_SYMBOL(return_address);
+
+#endif
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
new file mode 100644
index 00000000000..5d3f7a119ed
--- /dev/null
+++ b/arch/xtensa/kernel/syscall.c
@@ -0,0 +1,95 @@
+/*
+ * arch/xtensa/kernel/syscall.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ *
+ */
+#include <asm/uaccess.h>
+#include <asm/syscall.h>
+#include <asm/unistd.h>
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <linux/errno.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/shm.h>
+
+typedef void (*syscall_t)(void);
+
+syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
+ [0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall,
+
+#define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
+#include <uapi/asm/unistd.h>
+};
+
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
+ (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
+asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
+{
+ unsigned long ret;
+ long err;
+
+ err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
+ if (err)
+ return err;
+ return (long)ret;
+}
+
+asmlinkage long xtensa_fadvise64_64(int fd, int advice,
+ unsigned long long offset, unsigned long long len)
+{
+ return sys_fadvise64_64(fd, offset, len, advice);
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct vm_area_struct *vmm;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+ if (!vmm || addr + len <= vmm->vm_start)
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ }
+}
diff --git a/arch/xtensa/kernel/syscalls.c b/arch/xtensa/kernel/syscalls.c
deleted file mode 100644
index 4688ba2db84..00000000000
--- a/arch/xtensa/kernel/syscalls.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * arch/xtensa/kernel/syscall.c
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- * Copyright (C) 2000 Silicon Graphics, Inc.
- * Copyright (C) 1995 - 2000 by Ralf Baechle
- *
- * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
- * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
- * Chris Zankel <chris@zankel.net>
- * Kevin Chea
- *
- */
-
-#define DEBUG 0
-
-#include <linux/linkage.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/mman.h>
-#include <linux/sched.h>
-#include <linux/file.h>
-#include <linux/slab.h>
-#include <linux/utsname.h>
-#include <linux/unistd.h>
-#include <linux/stringify.h>
-#include <linux/syscalls.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/errno.h>
-#include <asm/ptrace.h>
-#include <asm/signal.h>
-#include <asm/uaccess.h>
-#include <asm/hardirq.h>
-#include <asm/mman.h>
-#include <asm/shmparam.h>
-#include <asm/page.h>
-
-extern void do_syscall_trace(void);
-typedef int (*syscall_t)(void *a0,...);
-extern syscall_t sys_call_table[];
-extern unsigned char sys_narg_table[];
-
-/*
- * sys_pipe() is the normal C calling standard for creating a pipe. It's not
- * the way unix traditional does this, though.
- */
-
-int sys_pipe(int __user *userfds)
-{
- int fd[2];
- int error;
-
- error = do_pipe(fd);
- if (!error) {
- if (copy_to_user(userfds, fd, 2 * sizeof(int)))
- error = -EFAULT;
- }
- return error;
-}
-
-/*
- * Common code for old and new mmaps.
- */
-long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
-int sys_clone(struct pt_regs *regs)
-{
- unsigned long clone_flags;
- unsigned long newsp;
- int __user *parent_tidptr, *child_tidptr;
- clone_flags = regs->areg[4];
- newsp = regs->areg[3];
- parent_tidptr = (int __user *)regs->areg[5];
- child_tidptr = (int __user *)regs->areg[6];
- if (!newsp)
- newsp = regs->areg[1];
- return do_fork(clone_flags,newsp,regs,0,parent_tidptr,child_tidptr);
-}
-
-/*
- * sys_execve() executes a new program.
- */
-
-int sys_execve(struct pt_regs *regs)
-{
- int error;
- char * filename;
-
- filename = getname((char *) (long)regs->areg[5]);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename, (char **) (long)regs->areg[3],
- (char **) (long)regs->areg[4], regs);
- putname(filename);
-
-out:
- return error;
-}
-
-int sys_uname(struct old_utsname * name)
-{
- if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
- return 0;
- return -EFAULT;
-}
-
-/*
- * Build the string table for the builtin "poor man's strace".
- */
-
-#if DEBUG
-#define SYSCALL(fun, narg) #fun,
-static char *sfnames[] = {
-#include "syscalls.h"
-};
-#undef SYS
-#endif
-
-void system_call (struct pt_regs *regs)
-{
- syscall_t syscall;
- unsigned long parm0, parm1, parm2, parm3, parm4, parm5;
- int nargs, res;
- unsigned int syscallnr;
- int ps;
-
-#if DEBUG
- int i;
- unsigned long parms[6];
- char *sysname;
-#endif
-
- regs->syscall = regs->areg[2];
-
- do_syscall_trace();
-
- /* Have to load after syscall_trace because strace
- * sometimes changes regs->syscall.
- */
- syscallnr = regs->syscall;
-
- parm0 = parm1 = parm2 = parm3 = parm4 = parm5 = 0;
-
- /* Restore interrupt level to syscall invoker's.
- * If this were in assembly, we wouldn't disable
- * interrupts in the first place:
- */
- local_save_flags (ps);
- local_irq_restore((ps & ~XCHAL_PS_INTLEVEL_MASK) |
- (regs->ps & XCHAL_PS_INTLEVEL_MASK) );
-
- if (syscallnr > __NR_Linux_syscalls) {
- regs->areg[2] = -ENOSYS;
- return;
- }
-
- syscall = sys_call_table[syscallnr];
- nargs = sys_narg_table[syscallnr];
-
- if (syscall == NULL) {
- regs->areg[2] = -ENOSYS;
- return;
- }
-
- /* There shouldn't be more than six arguments in the table! */
-
- if (nargs > 6)
- panic("Internal error - too many syscall arguments (%d)!\n",
- nargs);
-
- /* Linux takes system-call arguments in registers. The ABI
- * and Xtensa software conventions require the system-call
- * number in a2. If an argument exists in a2, we move it to
- * the next available register. Note that for improved
- * efficiency, we do NOT shift all parameters down one
- * register to maintain the original order.
- *
- * At best case (zero arguments), we just write the syscall
- * number to a2. At worst case (1 to 6 arguments), we move
- * the argument in a2 to the next available register, then
- * write the syscall number to a2.
- *
- * For clarity, the following truth table enumerates all
- * possibilities.
- *
- * arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5
- * --------- -------------- ----------------------------------
- * 0 a2
- * 1 a2 a3
- * 2 a2 a4, a3
- * 3 a2 a5, a3, a4
- * 4 a2 a6, a3, a4, a5
- * 5 a2 a7, a3, a4, a5, a6
- * 6 a2 a8, a3, a4, a5, a6, a7
- */
- if (nargs) {
- parm0 = regs->areg[nargs+2];
- parm1 = regs->areg[3];
- parm2 = regs->areg[4];
- parm3 = regs->areg[5];
- parm4 = regs->areg[6];
- parm5 = regs->areg[7];
- } else /* nargs == 0 */
- parm0 = (unsigned long) regs;
-
-#if DEBUG
- parms[0] = parm0;
- parms[1] = parm1;
- parms[2] = parm2;
- parms[3] = parm3;
- parms[4] = parm4;
- parms[5] = parm5;
-
- sysname = sfnames[syscallnr];
- if (strncmp(sysname, "sys_", 4) == 0)
- sysname = sysname + 4;
-
- printk("\017SYSCALL:I:%x:%d:%s %s(", regs->pc, current->pid,
- current->comm, sysname);
- for (i = 0; i < nargs; i++)
- printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
- printk(")\n");
-#endif
-
- res = syscall((void *)parm0, parm1, parm2, parm3, parm4, parm5);
-
-#if DEBUG
- printk("\017SYSCALL:O:%d:%s %s(",current->pid, current->comm, sysname);
- for (i = 0; i < nargs; i++)
- printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
- if (res < 4096)
- printk(") = %d\n", res);
- else
- printk(") = %#x\n", res);
-#endif /* DEBUG */
-
- regs->areg[2] = res;
- do_syscall_trace();
-}
diff --git a/arch/xtensa/kernel/syscalls.h b/arch/xtensa/kernel/syscalls.h
deleted file mode 100644
index 216c10a3150..00000000000
--- a/arch/xtensa/kernel/syscalls.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * arch/xtensa/kernel/syscalls.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- *
- * Changes by Joe Taylor <joe@tensilica.com>
- */
-
-/*
- * This file is being included twice - once to build a list of all
- * syscalls and once to build a table of how many arguments each syscall
- * accepts. Syscalls that receive a pointer to the saved registers are
- * marked as having zero arguments.
- *
- * The binary compatibility calls are in a separate list.
- *
- * Entry '0' used to be system_call. It's removed to disable indirect
- * system calls for now so user tasks can't recurse. See mips'
- * sys_syscall for a comparable example.
- */
-
-SYSCALL(0, 0) /* 00 */
-SYSCALL(sys_exit, 1)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_read, 3)
-SYSCALL(sys_write, 3)
-SYSCALL(sys_open, 3) /* 05 */
-SYSCALL(sys_close, 1)
-SYSCALL(sys_ni_syscall, 3)
-SYSCALL(sys_creat, 2)
-SYSCALL(sys_link, 2)
-SYSCALL(sys_unlink, 1) /* 10 */
-SYSCALL(sys_execve, 0)
-SYSCALL(sys_chdir, 1)
-SYSCALL(sys_ni_syscall, 1)
-SYSCALL(sys_mknod, 3)
-SYSCALL(sys_chmod, 2) /* 15 */
-SYSCALL(sys_lchown, 3)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_newstat, 2)
-SYSCALL(sys_lseek, 3)
-SYSCALL(sys_getpid, 0) /* 20 */
-SYSCALL(sys_mount, 5)
-SYSCALL(sys_ni_syscall, 1)
-SYSCALL(sys_setuid, 1)
-SYSCALL(sys_getuid, 0)
-SYSCALL(sys_ni_syscall, 1) /* 25 */
-SYSCALL(sys_ptrace, 4)
-SYSCALL(sys_ni_syscall, 1)
-SYSCALL(sys_newfstat, 2)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_utime, 2) /* 30 */
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_access, 2)
-SYSCALL(sys_ni_syscall, 1)
-SYSCALL(sys_ni_syscall, 0) /* 35 */
-SYSCALL(sys_sync, 0)
-SYSCALL(sys_kill, 2)
-SYSCALL(sys_rename, 2)
-SYSCALL(sys_mkdir, 2)
-SYSCALL(sys_rmdir, 1) /* 40 */
-SYSCALL(sys_dup, 1)
-SYSCALL(sys_pipe, 1)
-SYSCALL(sys_times, 1)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_brk, 1) /* 45 */
-SYSCALL(sys_setgid, 1)
-SYSCALL(sys_getgid, 0)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_geteuid, 0)
-SYSCALL(sys_getegid, 0) /* 50 */
-SYSCALL(sys_acct, 1)
-SYSCALL(sys_umount, 2)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_ioctl, 3)
-SYSCALL(sys_fcntl, 3) /* 55 */
-SYSCALL(sys_ni_syscall, 2)
-SYSCALL(sys_setpgid, 2)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_umask, 1) /* 60 */
-SYSCALL(sys_chroot, 1)
-SYSCALL(sys_ustat, 2)
-SYSCALL(sys_dup2, 2)
-SYSCALL(sys_getppid, 0)
-SYSCALL(sys_ni_syscall, 0) /* 65 */
-SYSCALL(sys_setsid, 0)
-SYSCALL(sys_sigaction, 3)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_ni_syscall, 1)
-SYSCALL(sys_setreuid, 2) /* 70 */
-SYSCALL(sys_setregid, 2)
-SYSCALL(sys_sigsuspend, 0)
-SYSCALL(sys_ni_syscall, 1)
-SYSCALL(sys_sethostname, 2)
-SYSCALL(sys_setrlimit, 2) /* 75 */
-SYSCALL(sys_getrlimit, 2)
-SYSCALL(sys_getrusage, 2)
-SYSCALL(sys_gettimeofday, 2)
-SYSCALL(sys_settimeofday, 2)
-SYSCALL(sys_getgroups, 2) /* 80 */
-SYSCALL(sys_setgroups, 2)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_symlink, 2)
-SYSCALL(sys_newlstat, 2)
-SYSCALL(sys_readlink, 3) /* 85 */
-SYSCALL(sys_uselib, 1)
-SYSCALL(sys_swapon, 2)
-SYSCALL(sys_reboot, 3)
-SYSCALL(sys_ni_syscall, 3)
-SYSCALL(sys_ni_syscall, 6) /* 90 */
-SYSCALL(sys_munmap, 2)
-SYSCALL(sys_truncate, 2)
-SYSCALL(sys_ftruncate, 2)
-SYSCALL(sys_fchmod, 2)
-SYSCALL(sys_fchown, 3) /* 95 */
-SYSCALL(sys_getpriority, 2)
-SYSCALL(sys_setpriority, 3)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_statfs, 2)
-SYSCALL(sys_fstatfs, 2) /* 100 */
-SYSCALL(sys_ni_syscall, 3)
-SYSCALL(sys_ni_syscall, 2)
-SYSCALL(sys_syslog, 3)
-SYSCALL(sys_setitimer, 3)
-SYSCALL(sys_getitimer, 2) /* 105 */
-SYSCALL(sys_newstat, 2)
-SYSCALL(sys_newlstat, 2)
-SYSCALL(sys_newfstat, 2)
-SYSCALL(sys_uname, 1)
-SYSCALL(sys_ni_syscall, 0) /* 110 */
-SYSCALL(sys_vhangup, 0)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_wait4, 4)
-SYSCALL(sys_swapoff, 1) /* 115 */
-SYSCALL(sys_sysinfo, 1)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_fsync, 1)
-SYSCALL(sys_sigreturn, 0)
-SYSCALL(sys_clone, 0) /* 120 */
-SYSCALL(sys_setdomainname, 2)
-SYSCALL(sys_newuname, 1)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_adjtimex, 1)
-SYSCALL(sys_mprotect, 3) /* 125 */
-SYSCALL(sys_ni_syscall, 3)
-SYSCALL(sys_ni_syscall, 2)
-SYSCALL(sys_init_module, 2)
-SYSCALL(sys_delete_module, 1)
-SYSCALL(sys_ni_syscall, 1) /* 130 */
-SYSCALL(sys_quotactl, 0)
-SYSCALL(sys_getpgid, 1)
-SYSCALL(sys_fchdir, 1)
-SYSCALL(sys_bdflush, 2)
-SYSCALL(sys_sysfs, 3) /* 135 */
-SYSCALL(sys_personality, 1)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_setfsuid, 1)
-SYSCALL(sys_setfsgid, 1)
-SYSCALL(sys_llseek, 5) /* 140 */
-SYSCALL(sys_getdents, 3)
-SYSCALL(sys_select, 5)
-SYSCALL(sys_flock, 2)
-SYSCALL(sys_msync, 3)
-SYSCALL(sys_readv, 3) /* 145 */
-SYSCALL(sys_writev, 3)
-SYSCALL(sys_ni_syscall, 3)
-SYSCALL(sys_ni_syscall, 3)
-SYSCALL(sys_ni_syscall, 4) /* handled in fast syscall handler. */
-SYSCALL(sys_ni_syscall, 0) /* 150 */
-SYSCALL(sys_getsid, 1)
-SYSCALL(sys_fdatasync, 1)
-SYSCALL(sys_sysctl, 1)
-SYSCALL(sys_mlock, 2)
-SYSCALL(sys_munlock, 2) /* 155 */
-SYSCALL(sys_mlockall, 1)
-SYSCALL(sys_munlockall, 0)
-SYSCALL(sys_sched_setparam,2)
-SYSCALL(sys_sched_getparam,2)
-SYSCALL(sys_sched_setscheduler,3) /* 160 */
-SYSCALL(sys_sched_getscheduler,1)
-SYSCALL(sys_sched_yield,0)
-SYSCALL(sys_sched_get_priority_max,1)
-SYSCALL(sys_sched_get_priority_min,1)
-SYSCALL(sys_sched_rr_get_interval,2) /* 165 */
-SYSCALL(sys_nanosleep,2)
-SYSCALL(sys_mremap,4)
-SYSCALL(sys_accept, 3)
-SYSCALL(sys_bind, 3)
-SYSCALL(sys_connect, 3) /* 170 */
-SYSCALL(sys_getpeername, 3)
-SYSCALL(sys_getsockname, 3)
-SYSCALL(sys_getsockopt, 5)
-SYSCALL(sys_listen, 2)
-SYSCALL(sys_recv, 4) /* 175 */
-SYSCALL(sys_recvfrom, 6)
-SYSCALL(sys_recvmsg, 3)
-SYSCALL(sys_send, 4)
-SYSCALL(sys_sendmsg, 3)
-SYSCALL(sys_sendto, 6) /* 180 */
-SYSCALL(sys_setsockopt, 5)
-SYSCALL(sys_shutdown, 2)
-SYSCALL(sys_socket, 3)
-SYSCALL(sys_socketpair, 4)
-SYSCALL(sys_setresuid, 3) /* 185 */
-SYSCALL(sys_getresuid, 3)
-SYSCALL(sys_ni_syscall, 5)
-SYSCALL(sys_poll, 3)
-SYSCALL(sys_nfsservctl, 3)
-SYSCALL(sys_setresgid, 3) /* 190 */
-SYSCALL(sys_getresgid, 3)
-SYSCALL(sys_prctl, 5)
-SYSCALL(sys_rt_sigreturn, 0)
-SYSCALL(sys_rt_sigaction, 4)
-SYSCALL(sys_rt_sigprocmask, 4) /* 195 */
-SYSCALL(sys_rt_sigpending, 2)
-SYSCALL(sys_rt_sigtimedwait, 4)
-SYSCALL(sys_rt_sigqueueinfo, 3)
-SYSCALL(sys_rt_sigsuspend, 0)
-SYSCALL(sys_pread64, 5) /* 200 */
-SYSCALL(sys_pwrite64, 5)
-SYSCALL(sys_chown, 3)
-SYSCALL(sys_getcwd, 2)
-SYSCALL(sys_capget, 2)
-SYSCALL(sys_capset, 2) /* 205 */
-SYSCALL(sys_sigaltstack, 0)
-SYSCALL(sys_sendfile, 4)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_ni_syscall, 0)
-SYSCALL(sys_mmap, 6) /* 210 */
-SYSCALL(sys_truncate64, 2)
-SYSCALL(sys_ftruncate64, 2)
-SYSCALL(sys_stat64, 2)
-SYSCALL(sys_lstat64, 2)
-SYSCALL(sys_fstat64, 2) /* 215 */
-SYSCALL(sys_pivot_root, 2)
-SYSCALL(sys_mincore, 3)
-SYSCALL(sys_madvise, 3)
-SYSCALL(sys_getdents64, 3)
-SYSCALL(sys_ni_syscall, 0) /* 220 */
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 412ab32de39..2a1823de69c 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -13,208 +13,169 @@
*/
#include <linux/errno.h>
+#include <linux/sched.h>
#include <linux/time.h>
-#include <linux/timex.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/profile.h>
#include <linux/delay.h>
+#include <linux/irqdomain.h>
+#include <linux/sched_clock.h>
#include <asm/timex.h>
#include <asm/platform.h>
+unsigned long ccount_freq; /* ccount Hz */
+EXPORT_SYMBOL(ccount_freq);
-extern volatile unsigned long wall_jiffies;
+static cycle_t ccount_read(struct clocksource *cs)
+{
+ return (cycle_t)get_ccount();
+}
-DEFINE_SPINLOCK(rtc_lock);
-EXPORT_SYMBOL(rtc_lock);
+static u64 notrace ccount_sched_clock_read(void)
+{
+ return get_ccount();
+}
+static struct clocksource ccount_clocksource = {
+ .name = "ccount",
+ .rating = 200,
+ .read = ccount_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
-#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
-unsigned long ccount_per_jiffy; /* per 1/HZ */
-unsigned long ccount_nsec; /* nsec per ccount increment */
-#endif
+static int ccount_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *dev);
+static void ccount_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt);
+struct ccount_timer {
+ struct clock_event_device evt;
+ int irq_enabled;
+ char name[24];
+};
+static DEFINE_PER_CPU(struct ccount_timer, ccount_timer);
-unsigned int last_ccount_stamp;
-static long last_rtc_update = 0;
+static int ccount_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ unsigned long flags, next;
+ int ret = 0;
-/*
- * Scheduler clock - returns current tim in nanosec units.
- */
+ local_irq_save(flags);
+ next = get_ccount() + delta;
+ set_linux_timer(next);
+ if (next - get_ccount() > delta)
+ ret = -ETIME;
+ local_irq_restore(flags);
+
+ return ret;
+}
-unsigned long long sched_clock(void)
+static void ccount_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
{
- return (unsigned long long)jiffies * (1000000000 / HZ);
+ struct ccount_timer *timer =
+ container_of(evt, struct ccount_timer, evt);
+
+ /*
+ * There is no way to disable the timer interrupt at the device level,
+ * only at the intenable register itself. Since enable_irq/disable_irq
+ * calls are nested, we need to make sure that these calls are
+ * balanced.
+ */
+ switch (mode) {
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ if (timer->irq_enabled) {
+ disable_irq(evt->irq);
+ timer->irq_enabled = 0;
+ }
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_ONESHOT:
+ if (!timer->irq_enabled) {
+ enable_irq(evt->irq);
+ timer->irq_enabled = 1;
+ }
+ default:
+ break;
+ }
}
-static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t timer_interrupt(int irq, void *dev_id);
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_TIMER,
.name = "timer",
};
-void __init time_init(void)
+void local_timer_setup(unsigned cpu)
{
- time_t sec_o, sec_n = 0;
-
- /* The platform must provide a function to calibrate the processor
- * speed for the CALIBRATE.
- */
+ struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
+ struct clock_event_device *clockevent = &timer->evt;
+
+ timer->irq_enabled = 1;
+ clockevent->name = timer->name;
+ snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
+ clockevent->features = CLOCK_EVT_FEAT_ONESHOT;
+ clockevent->rating = 300;
+ clockevent->set_next_event = ccount_timer_set_next_event;
+ clockevent->set_mode = ccount_timer_set_mode;
+ clockevent->cpumask = cpumask_of(cpu);
+ clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
+ if (WARN(!clockevent->irq, "error: can't map timer irq"))
+ return;
+ clockevents_config_and_register(clockevent, ccount_freq,
+ 0xf, 0xffffffff);
+}
+void __init time_init(void)
+{
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
printk("Calibrating CPU frequency ");
platform_calibrate_ccount();
- printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
- (int)(ccount_per_jiffy/(10000/HZ))%100);
+ printk("%d.%02d MHz\n", (int)ccount_freq/1000000,
+ (int)(ccount_freq/10000)%100);
+#else
+ ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
#endif
-
- /* Set time from RTC (if provided) */
-
- if (platform_get_rtc_time(&sec_o) == 0)
- while (platform_get_rtc_time(&sec_n))
- if (sec_o != sec_n)
- break;
-
- xtime.tv_nsec = 0;
- last_rtc_update = xtime.tv_sec = sec_n;
- last_ccount_stamp = get_ccount();
-
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
-
- /* Initialize the linux timer interrupt. */
-
- setup_irq(LINUX_TIMER_INT, &timer_irqaction);
- set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
-}
-
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
- unsigned long ccount;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
-
- /* This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * made, and then undo it!
- */
- ccount = get_ccount();
- nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC;
- nsec -= (jiffies - wall_jiffies) * CCOUNT_PER_JIFFY * CCOUNT_NSEC;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-
-
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long flags;
- unsigned long sec, usec, delta, lost, seq;
-
- do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
-
- delta = get_ccount() - last_ccount_stamp;
- sec = xtime.tv_sec;
- usec = (xtime.tv_nsec / NSEC_PER_USEC);
-
- lost = jiffies - wall_jiffies;
-
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-
- usec += lost * (1000000UL/HZ) + (delta * CCOUNT_NSEC) / NSEC_PER_USEC;
- for (; usec >= 1000000; sec++, usec -= 1000000)
- ;
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
+ clocksource_register_hz(&ccount_clocksource, ccount_freq);
+ local_timer_setup(0);
+ setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction);
+ sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
+ clocksource_of_init();
}
-EXPORT_SYMBOL(do_gettimeofday);
-
/*
* The timer interrupt is called HZ times per second.
*/
-irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+irqreturn_t timer_interrupt(int irq, void *dev_id)
{
+ struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
- unsigned long next;
-
- next = get_linux_timer();
-
-again:
- while ((signed long)(get_ccount() - next) > 0) {
-
- profile_tick(CPU_PROFILING, regs);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(regs));
-#endif
-
- write_seqlock(&xtime_lock);
-
- last_ccount_stamp = next;
- next += CCOUNT_PER_JIFFY;
- do_timer (regs); /* Linux handler in kernel/timer.c */
-
- if (ntp_synced() &&
- xtime.tv_sec - last_rtc_update >= 659 &&
- abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ &&
- jiffies - wall_jiffies == 1) {
-
- if (platform_set_rtc_time(xtime.tv_sec+1) == 0)
- last_rtc_update = xtime.tv_sec+1;
- else
- /* Do it again in 60 s */
- last_rtc_update += 60;
- }
- write_sequnlock(&xtime_lock);
- }
-
- /* NOTE: writing CCOMPAREn clears the interrupt. */
-
- set_linux_timer (next);
-
- /* Make sure we didn't miss any tick... */
-
- if ((signed long)(get_ccount() - next) > 0)
- goto again;
+ set_linux_timer(get_linux_timer());
+ evt->event_handler(evt);
/* Allow platform to do something useful (Wdog). */
-
platform_heartbeat();
return IRQ_HANDLED;
}
#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
-void __devinit calibrate_delay(void)
+void calibrate_delay(void)
{
- loops_per_jiffy = CCOUNT_PER_JIFFY;
+ loops_per_jiffy = ccount_freq / HZ;
printk("Calibrating delay loop (skipped)... "
"%lu.%02lu BogoMIPS preset\n",
loops_per_jiffy/(1000000/HZ),
(loops_per_jiffy/(10000/HZ)) % 100);
}
#endif
-
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index ce077d6bf3a..eebbfd8c26f 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -11,7 +11,7 @@
*
* Essentially rewritten for the Xtensa architecture port.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Chris Zankel <chris@zankel.net>
@@ -30,12 +30,15 @@
#include <linux/stringify.h>
#include <linux/kallsyms.h>
#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/stacktrace.h>
#include <asm/ptrace.h>
#include <asm/timex.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
+#include <asm/traps.h>
#ifdef CONFIG_KGDB
extern int gdb_enter;
@@ -75,7 +78,7 @@ extern void system_call (struct pt_regs*);
#define USER 0x02
#define COPROCESSOR(x) \
-{ XCHAL_EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
+{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
typedef struct {
int cause;
@@ -83,63 +86,65 @@ typedef struct {
void* handler;
} dispatch_init_table_t;
-dispatch_init_table_t __init dispatch_init_table[] = {
-
-{ XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
-{ XCHAL_EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
-{ XCHAL_EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
-{ XCHAL_EXCCAUSE_SYSTEM_CALL, 0, system_call },
-/* XCHAL_EXCCAUSE_INSTRUCTION_FETCH unhandled */
-/* XCHAL_EXCCAUSE_LOAD_STORE_ERROR unhandled*/
-{ XCHAL_EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
-{ XCHAL_EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
-/* XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
-/* XCHAL_EXCCAUSE_PRIVILEGED unhandled */
+static dispatch_init_table_t __initdata dispatch_init_table[] = {
+
+{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
+{ EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
+{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
+{ EXCCAUSE_SYSTEM_CALL, 0, system_call },
+/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
+/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
+{ EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
+{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
+/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
+/* EXCCAUSE_PRIVILEGED unhandled */
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
-#ifdef CONFIG_UNALIGNED_USER
-{ XCHAL_EXCCAUSE_UNALIGNED, USER, fast_unaligned },
+#ifdef CONFIG_XTENSA_UNALIGNED_USER
+{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
#else
-{ XCHAL_EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
+{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
#endif
-{ XCHAL_EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
+{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
#endif
-{ XCHAL_EXCCAUSE_ITLB_MISS, 0, do_page_fault },
-{ XCHAL_EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
-{ XCHAL_EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
-{ XCHAL_EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
-/* XCHAL_EXCCAUSE_SIZE_RESTRICTION unhandled */
-{ XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
-{ XCHAL_EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
-{ XCHAL_EXCCAUSE_DTLB_MISS, 0, do_page_fault },
-{ XCHAL_EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
-{ XCHAL_EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
-/* XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
-{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
-{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
-{ XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
+#ifdef CONFIG_MMU
+{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
+{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
+{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
+/* EXCCAUSE_SIZE_RESTRICTION unhandled */
+{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
+{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
+{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
+{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
+/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
+{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
+#endif /* CONFIG_MMU */
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
-#if (XCHAL_CP_MASK & 1)
+#if XTENSA_HAVE_COPROCESSOR(0)
COPROCESSOR(0),
#endif
-#if (XCHAL_CP_MASK & 2)
+#if XTENSA_HAVE_COPROCESSOR(1)
COPROCESSOR(1),
#endif
-#if (XCHAL_CP_MASK & 4)
+#if XTENSA_HAVE_COPROCESSOR(2)
COPROCESSOR(2),
#endif
-#if (XCHAL_CP_MASK & 8)
+#if XTENSA_HAVE_COPROCESSOR(3)
COPROCESSOR(3),
#endif
-#if (XCHAL_CP_MASK & 16)
+#if XTENSA_HAVE_COPROCESSOR(4)
COPROCESSOR(4),
#endif
-#if (XCHAL_CP_MASK & 32)
+#if XTENSA_HAVE_COPROCESSOR(5)
COPROCESSOR(5),
#endif
-#if (XCHAL_CP_MASK & 64)
+#if XTENSA_HAVE_COPROCESSOR(6)
COPROCESSOR(6),
#endif
-#if (XCHAL_CP_MASK & 128)
+#if XTENSA_HAVE_COPROCESSOR(7)
COPROCESSOR(7),
#endif
{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
@@ -152,7 +157,7 @@ COPROCESSOR(7),
* 2. it is a temporary memory buffer for the exception handlers.
*/
-unsigned long exc_table[EXC_TABLE_SIZE/4];
+DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]);
void die(const char*, struct pt_regs*, long);
@@ -176,7 +181,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause)
printk("Caught unhandled exception in '%s' "
"(pid = %d, pc = %#010lx) - should not happen\n"
"\tEXCCAUSE is %ld\n",
- current->comm, current->pid, regs->pc, exccause);
+ current->comm, task_pid_nr(current), regs->pc, exccause);
force_sig(SIGILL, current);
}
@@ -190,30 +195,48 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
}
/*
- * Level-1 interrupt.
- * We currently have no priority encoding.
+ * IRQ handler.
*/
-unsigned long ignored_level1_interrupts;
extern void do_IRQ(int, struct pt_regs *);
-void do_interrupt (struct pt_regs *regs)
+void do_interrupt(struct pt_regs *regs)
{
- unsigned long intread = get_sr (INTREAD);
- unsigned long intenable = get_sr (INTENABLE);
- int i, mask;
+ static const unsigned int_level_mask[] = {
+ 0,
+ XCHAL_INTLEVEL1_MASK,
+ XCHAL_INTLEVEL2_MASK,
+ XCHAL_INTLEVEL3_MASK,
+ XCHAL_INTLEVEL4_MASK,
+ XCHAL_INTLEVEL5_MASK,
+ XCHAL_INTLEVEL6_MASK,
+ XCHAL_INTLEVEL7_MASK,
+ };
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ for (;;) {
+ unsigned intread = get_sr(interrupt);
+ unsigned intenable = get_sr(intenable);
+ unsigned int_at_level = intread & intenable;
+ unsigned level;
+
+ for (level = LOCKLEVEL; level > 0; --level) {
+ if (int_at_level & int_level_mask[level]) {
+ int_at_level &= int_level_mask[level];
+ break;
+ }
+ }
- /* Handle all interrupts (no priorities).
- * (Clear the interrupt before processing, in case it's
- * edge-triggered or software-generated)
- */
+ if (level == 0)
+ break;
- for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
- if (mask & (intread & intenable)) {
- set_sr (mask, INTCLEAR);
- do_IRQ (i,regs);
- }
+ do_IRQ(__ffs(int_at_level), regs);
}
+
+ irq_exit();
+ set_irq_regs(old_regs);
}
/*
@@ -228,7 +251,7 @@ do_illegal_instruction(struct pt_regs *regs)
/* If in user mode, send SIGILL signal to current process. */
printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
- current->comm, current->pid, regs->pc);
+ current->comm, task_pid_nr(current), regs->pc);
force_sig(SIGILL, current);
}
@@ -241,7 +264,7 @@ do_illegal_instruction(struct pt_regs *regs)
*/
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
-#ifndef CONFIG_UNALIGNED_USER
+#ifndef CONFIG_XTENSA_UNALIGNED_USER
void
do_unaligned_user (struct pt_regs *regs)
{
@@ -254,7 +277,7 @@ do_unaligned_user (struct pt_regs *regs)
current->thread.error_code = -3;
printk("Unaligned memory access to %08lx in '%s' "
"(pid = %d, pc = %#010lx)\n",
- regs->excvaddr, current->comm, current->pid, regs->pc);
+ regs->excvaddr, current->comm, task_pid_nr(current), regs->pc);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
@@ -290,6 +313,31 @@ do_debug(struct pt_regs *regs)
}
+static void set_handler(int idx, void *handler)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(exc_table, cpu)[idx] = (unsigned long)handler;
+}
+
+/* Set exception C handler - for temporary use when probing exceptions */
+
+void * __init trap_set_handler(int cause, void *handler)
+{
+ void *previous = (void *)per_cpu(exc_table, 0)[
+ EXC_TABLE_DEFAULT / 4 + cause];
+ set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler);
+ return previous;
+}
+
+
+static void trap_init_excsave(void)
+{
+ unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table);
+ __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
+}
+
/*
* Initialize dispatch tables.
*
@@ -303,9 +351,7 @@ do_debug(struct pt_regs *regs)
* See vectors.S for more details.
*/
-#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
-
-void trap_init(void)
+void __init trap_init(void)
{
int i;
@@ -334,10 +380,15 @@ void trap_init(void)
}
/* Initialize EXCSAVE_1 to hold the address of the exception table. */
+ trap_init_excsave();
+}
- i = (unsigned long)exc_table;
- __asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i));
+#ifdef CONFIG_SMP
+void secondary_trap_init(void)
+{
+ trap_init_excsave();
}
+#endif
/*
* This function dumps the current valid window frame and other base registers.
@@ -347,16 +398,16 @@ void show_regs(struct pt_regs * regs)
{
int i, wmask;
+ show_regs_print_info(KERN_DEFAULT);
+
wmask = regs->wmask & ~1;
- for (i = 0; i < 32; i++) {
- if (wmask & (1 << (i / 4)))
- break;
+ for (i = 0; i < 16; i++) {
if ((i % 8) == 0)
- printk ("\n" KERN_INFO "a%02d: ", i);
- printk("%08lx ", regs->areg[i]);
+ printk(KERN_INFO "a%02d:", i);
+ printk(KERN_CONT " %08lx", regs->areg[i]);
}
- printk("\n");
+ printk(KERN_CONT "\n");
printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
regs->pc, regs->ps, regs->depc, regs->excvaddr);
@@ -368,42 +419,25 @@ void show_regs(struct pt_regs * regs)
regs->syscall);
}
-void show_trace(struct task_struct *task, unsigned long *sp)
+static int show_trace_cb(struct stackframe *frame, void *data)
{
- unsigned long a0, a1, pc;
- unsigned long sp_start, sp_end;
-
- a1 = (unsigned long)sp;
-
- if (a1 == 0)
- __asm__ __volatile__ ("mov %0, a1\n" : "=a"(a1));
-
+ if (kernel_text_address(frame->pc)) {
+ printk(" [<%08lx>] ", frame->pc);
+ print_symbol("%s\n", frame->pc);
+ }
+ return 0;
+}
- sp_start = a1 & ~(THREAD_SIZE-1);
- sp_end = sp_start + THREAD_SIZE;
+void show_trace(struct task_struct *task, unsigned long *sp)
+{
+ if (!sp)
+ sp = stack_pointer(task);
printk("Call Trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
- spill_registers();
-
- while (a1 > sp_start && a1 < sp_end) {
- sp = (unsigned long*)a1;
-
- a0 = *(sp - 4);
- a1 = *(sp - 3);
-
- if (a1 <= (unsigned long) sp)
- break;
-
- pc = MAKE_PC_FROM_RA(a0, a1);
-
- if (kernel_text_address(pc)) {
- printk(" [<%08lx>] ", pc);
- print_symbol("%s\n", pc);
- }
- }
+ walk_stackframe(sp, show_trace_cb, NULL);
printk("\n");
}
@@ -419,10 +453,9 @@ void show_stack(struct task_struct *task, unsigned long *sp)
int i = 0;
unsigned long *stack;
- if (sp == 0)
- __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
-
- stack = sp;
+ if (!sp)
+ sp = stack_pointer(task);
+ stack = sp;
printk("\nStack: ");
@@ -437,14 +470,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
show_trace(task, stack);
}
-void dump_stack(void)
-{
- show_stack(current, NULL);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
-
void show_code(unsigned int *pc)
{
long i;
@@ -482,6 +507,7 @@ void die(const char * str, struct pt_regs * regs, long err)
if (!user_mode(regs))
show_stack(NULL, (unsigned long*)regs->areg[1]);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
@@ -492,5 +518,3 @@ void die(const char * str, struct pt_regs * regs, long err)
do_exit(err);
}
-
-
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 0e74397bfa2..8453e6e3989 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -10,7 +10,7 @@
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
- * Copyright (C) 2005 Tensilica, Inc.
+ * Copyright (C) 2005 - 2008 Tensilica, Inc.
*
* Chris Zankel <chris@zankel.net>
*
@@ -44,14 +44,15 @@
#include <linux/linkage.h>
#include <asm/ptrace.h>
-#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
-#include <asm/processor.h>
+#include <asm/vectors.h>
+
+#define WINDOW_VECTORS_SIZE 0x180
/*
@@ -69,16 +70,19 @@
ENTRY(_UserExceptionVector)
- xsr a3, EXCSAVE_1 # save a3 and get dispatch table
- wsr a2, DEPC # save a2
+ xsr a3, excsave1 # save a3 and get dispatch table
+ wsr a2, depc # save a2
l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
s32i a0, a2, PT_AREG0 # save a0 to ESF
- rsr a0, EXCCAUSE # retrieve exception cause
+ rsr a0, exccause # retrieve exception cause
s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ xsr a3, excsave1 # restore a3 and dispatch table
jx a0
+ENDPROC(_UserExceptionVector)
+
/*
* Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
*
@@ -93,16 +97,18 @@ ENTRY(_UserExceptionVector)
ENTRY(_KernelExceptionVector)
- xsr a3, EXCSAVE_1 # save a3, and get dispatch table
- wsr a2, DEPC # save a2
+ xsr a3, excsave1 # save a3, and get dispatch table
+ wsr a2, depc # save a2
addi a2, a1, -16-PT_SIZE # adjust stack pointer
s32i a0, a2, PT_AREG0 # save a0 to ESF
- rsr a0, EXCCAUSE # retrieve exception cause
+ rsr a0, exccause # retrieve exception cause
s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
+ xsr a3, excsave1 # restore a3 and dispatch table
jx a0
+ENDPROC(_KernelExceptionVector)
/*
* Double exception vector (Exceptions with PS.EXCM == 1)
@@ -164,7 +170,7 @@ ENTRY(_KernelExceptionVector)
*
* a0: DEPC
* a1: a1
- * a2: trashed, original value in EXC_TABLE_DOUBLE_A2
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
* a3: exctable
* depc: a0
* excsave_1: a3
@@ -200,41 +206,46 @@ ENTRY(_KernelExceptionVector)
.section .DoubleExceptionVector.text, "ax"
.begin literal_prefix .DoubleExceptionVector
+ .globl _DoubleExceptionVector_WindowUnderflow
+ .globl _DoubleExceptionVector_WindowOverflow
ENTRY(_DoubleExceptionVector)
- /* Deliberately destroy excsave (don't assume it's value was valid). */
-
- wsr a3, EXCSAVE_1 # save a3
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
/* Check for kernel double exception (usually fatal). */
- rsr a3, PS
- _bbci.l a3, PS_UM_SHIFT, .Lksp
+ rsr a2, ps
+ _bbci.l a2, PS_UM_BIT, .Lksp
/* Check if we are currently handling a window exception. */
/* Note: We don't need to indicate that we enter a critical section. */
- xsr a0, DEPC # get DEPC, save a0
+ xsr a0, depc # get DEPC, save a0
- movi a3, XCHAL_WINDOW_VECTORS_VADDR
- _bltu a0, a3, .Lfixup
- addi a3, a3, XSHAL_WINDOW_VECTORS_SIZE
- _bgeu a0, a3, .Lfixup
+ movi a2, WINDOW_VECTORS_VADDR
+ _bltu a0, a2, .Lfixup
+ addi a2, a2, WINDOW_VECTORS_SIZE
+ _bgeu a0, a2, .Lfixup
/* Window overflow/underflow exception. Get stack pointer. */
- mov a3, a2
- movi a2, exc_table
- l32i a2, a2, EXC_TABLE_KSTK
+ l32i a2, a3, EXC_TABLE_KSTK
/* Check for overflow/underflow exception, jump if overflow. */
- _bbci.l a0, 6, .Lovfl
+ bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
- /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
-
- /* Restart window underflow exception.
+ /*
+ * Restart window underflow exception.
+ * Currently:
+ * depc = orig a0,
+ * a0 = orig DEPC,
+ * a2 = new sp based on KSTK from exc_table
+ * a3 = excsave_1
+ * excsave_1 = orig a3
+ *
* We return to the instruction in user space that caused the window
* underflow exception. Therefore, we change window base to the value
* before we entered the window underflow exception and prepare the
@@ -242,39 +253,69 @@ ENTRY(_DoubleExceptionVector)
* by changing depc (in a0).
* Note: We can trash the current window frame (a0...a3) and depc!
*/
-
- wsr a2, DEPC # save stack pointer temporarily
- rsr a0, PS
- extui a0, a0, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
- wsr a0, WINDOWBASE
+_DoubleExceptionVector_WindowUnderflow:
+ xsr a3, excsave1
+ wsr a2, depc # save stack pointer temporarily
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ wsr a0, windowbase
rsync
/* We are now in the previous window frame. Save registers again. */
- xsr a2, DEPC # save a2 and get stack pointer
+ xsr a2, depc # save a2 and get stack pointer
s32i a0, a2, PT_AREG0
-
- wsr a3, EXCSAVE_1 # save a3
- movi a3, exc_table
-
- rsr a0, EXCCAUSE
+ xsr a3, excsave1
+ rsr a0, exccause
s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3
+ xsr a3, excsave1
l32i a0, a0, EXC_TABLE_FAST_USER
jx a0
-.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
+ /*
+ * We only allow the ITLB miss exception if we are in kernel space.
+ * All other exceptions are unexpected and thus unrecoverable!
+ */
+
+#ifdef CONFIG_MMU
+ .extern fast_second_level_miss_double_kernel
+
+.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
+
+ rsr a3, exccause
+ beqi a3, EXCCAUSE_ITLB_MISS, 1f
+ addi a3, a3, -EXCCAUSE_DTLB_MISS
+ bnez a3, .Lunrecoverable
+1: movi a3, fast_second_level_miss_double_kernel
+ jx a3
+#else
+.equ .Lksp, .Lunrecoverable
+#endif
+
+ /* Critical! We can't handle this situation. PANIC! */
+
+ .extern unrecoverable_exception
+
+.Lunrecoverable_fixup:
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a0, depc
+
+.Lunrecoverable:
+ rsr a3, excsave1
+ wsr a0, excsave1
+ movi a0, unrecoverable_exception
+ callx0 a0
- /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */
+.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
- movi a3, exc_table
- s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable
+ /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave1: a3 */
/* Enter critical section. */
l32i a2, a3, EXC_TABLE_FIXUP
s32i a3, a3, EXC_TABLE_FIXUP
- beq a2, a3, .Lunrecoverable_fixup # critical!
+ beq a2, a3, .Lunrecoverable_fixup # critical section
beqz a2, .Ldflt # no handler was registered
/* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */
@@ -283,59 +324,265 @@ ENTRY(_DoubleExceptionVector)
.Ldflt: /* Get stack pointer. */
- l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
- addi a2, a3, -PT_USER_SIZE
-
-.Lovfl: /* Jump to default handlers. */
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ addi a2, a2, -PT_USER_SIZE
- /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
+ /* a0: depc, a1: a1, a2: kstk, a3: exctable, depc: a0, excsave: a3 */
- xsr a3, DEPC
s32i a0, a2, PT_DEPC
- s32i a3, a2, PT_AREG0
+ l32i a0, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a0, depc
+ s32i a0, a2, PT_AREG0
- /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
+ /* a0: avail, a1: a1, a2: kstk, a3: exctable, depc: a2, excsave: a3 */
- movi a3, exc_table
- rsr a0, EXCCAUSE
+ rsr a0, exccause
addx4 a0, a0, a3
+ xsr a3, excsave1
l32i a0, a0, EXC_TABLE_FAST_USER
jx a0
/*
- * We only allow the ITLB miss exception if we are in kernel space.
- * All other exceptions are unexpected and thus unrecoverable!
+ * Restart window OVERFLOW exception.
+ * Currently:
+ * depc = orig a0,
+ * a0 = orig DEPC,
+ * a2 = new sp based on KSTK from exc_table
+ * a3 = EXCSAVE_1
+ * excsave_1 = orig a3
+ *
+ * We return to the instruction in user space that caused the window
+ * overflow exception. Therefore, we change window base to the value
+ * before we entered the window overflow exception and prepare the
+ * registers to return as if we were coming from a regular exception
+ * by changing DEPC (in a0).
+ *
+ * NOTE: We CANNOT trash the current window frame (a0...a3), but we
+ * can clobber depc.
+ *
+ * The tricky part here is that overflow8 and overflow12 handlers
+ * save a0, then clobber a0. To restart the handler, we have to restore
+ * a0 if the double exception was past the point where a0 was clobbered.
+ *
+ * To keep things simple, we take advantage of the fact all overflow
+ * handlers save a0 in their very first instruction. If DEPC was past
+ * that instruction, we can safely restore a0 from where it was saved
+ * on the stack.
+ *
+ * a0: depc, a1: a1, a2: kstk, a3: exc_table, depc: a0, excsave1: a3
*/
+_DoubleExceptionVector_WindowOverflow:
+ extui a2, a0, 0, 6 # get offset into 64-byte vector handler
+ beqz a2, 1f # if at start of vector, don't restore
- .extern fast_second_level_miss_double_kernel
+ addi a0, a0, -128
+ bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
-.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
+ /*
+ * This fixup handler is for the extremely unlikely case where the
+ * overflow handler's reference thru a0 gets a hardware TLB refill
+ * that bumps out the (distinct, aliasing) TLB entry that mapped its
+ * prior references thru a9/a13, and where our reference now thru
+ * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+ */
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
- rsr a3, EXCCAUSE
- beqi a3, XCHAL_EXCCAUSE_ITLB_MISS, 1f
- addi a3, a3, -XCHAL_EXCCAUSE_DTLB_MISS
- bnez a3, .Lunrecoverable
-1: movi a3, fast_second_level_miss_double_kernel
- jx a3
+ bbsi.l a0, 7, 2f
- /* Critical! We can't handle this situation. PANIC! */
+ /*
+ * Restore a0 as saved by _WindowOverflow8().
+ */
- .extern unrecoverable_exception
+ l32e a0, a9, -16
+ wsr a0, depc # replace the saved a0
+ j 3f
+
+2:
+ /*
+ * Restore a0 as saved by _WindowOverflow12().
+ */
+
+ l32e a0, a13, -16
+ wsr a0, depc # replace the saved a0
+3:
+ xsr a3, excsave1
+ movi a0, 0
+ s32i a0, a3, EXC_TABLE_FIXUP
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+1:
+ /*
+ * Restore WindowBase while leaving all address registers restored.
+ * We have to use ROTW for this, because WSR.WINDOWBASE requires
+ * an address register (which would prevent restore).
+ *
+ * Window Base goes from 0 ... 7 (Module 8)
+ * Window Start is 8 bits; Ex: (0b1010 1010):0x55 from series of call4s
+ */
+
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ rsr a2, windowbase
+ sub a0, a2, a0
+ extui a0, a0, 0, 3
-.Lunrecoverable_fixup:
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
- xsr a0, DEPC
+ xsr a3, excsave1
+ beqi a0, 1, .L1pane
+ beqi a0, 3, .L3pane
-.Lunrecoverable:
- rsr a3, EXCSAVE_1
- wsr a0, EXCSAVE_1
- movi a0, unrecoverable_exception
- callx0 a0
+ rsr a0, depc
+ rotw -2
- .end literal_prefix
+ /*
+ * We are now in the user code's original window frame.
+ * Process the exception as a user exception as if it was
+ * taken by the user code.
+ *
+ * This is similar to the user exception vector,
+ * except that PT_DEPC isn't set to EXCCAUSE.
+ */
+1:
+ xsr a3, excsave1
+ wsr a2, depc
+ l32i a2, a3, EXC_TABLE_KSTK
+ s32i a0, a2, PT_AREG0
+ rsr a0, exccause
+ s32i a0, a2, PT_DEPC
+
+_DoubleExceptionVector_handle_exception:
+ addx4 a0, a0, a3
+ l32i a0, a0, EXC_TABLE_FAST_USER
+ xsr a3, excsave1
+ jx a0
+
+.L1pane:
+ rsr a0, depc
+ rotw -1
+ j 1b
+
+.L3pane:
+ rsr a0, depc
+ rotw -3
+ j 1b
+
+
+ENDPROC(_DoubleExceptionVector)
/*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ * (we'll need to rotate window back and there's no place to save this
+ * information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ * about the conditions of the original double exception that happened in
+ * the window overflow handler is lost, so we just return to userspace to
+ * retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ rsr a2, windowbase
+ sub a0, a2, a0
+ extui a0, a0, 0, 3
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+
+ _beqi a0, 1, .Lhandle_1
+ _beqi a0, 3, .Lhandle_3
+
+ .macro overflow_fixup_handle_exception_pane n
+
+ rsr a0, depc
+ rotw -\n
+
+ xsr a3, excsave1
+ wsr a2, depc
+ l32i a2, a3, EXC_TABLE_KSTK
+ s32i a0, a2, PT_AREG0
+
+ movi a0, .Lrestore_\n
+ s32i a0, a2, PT_DEPC
+ rsr a0, exccause
+ j _DoubleExceptionVector_handle_exception
+
+ .endm
+
+ overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+ overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+ overflow_fixup_handle_exception_pane 3
+
+ .macro overflow_fixup_restore_a0_pane n
+
+ rotw \n
+ /* Need to preserve a0 value here to be able to handle exception
+ * that may occur on a0 reload from stack. It may occur because
+ * TLB miss handler may not be atomic and pointer to page table
+ * may be lost before we get here. There are no free registers,
+ * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+ */
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ bbsi.l a0, 7, 1f
+ l32e a0, a9, -16
+ j 2f
+1:
+ l32e a0, a13, -16
+2:
+ rotw -\n
+
+ .endm
+
+.Lrestore_2:
+ overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, 0
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ rfe
+
+.Lrestore_1:
+ overflow_fixup_restore_a0_pane 1
+ j .Lset_default_fixup
+.Lrestore_3:
+ overflow_fixup_restore_a0_pane 3
+ j .Lset_default_fixup
+
+ENDPROC(window_overflow_restore_a0_fixup)
+
+ .end literal_prefix
+/*
* Debug interrupt vector
*
* There is not much space here, so simply jump to another handler.
@@ -345,9 +592,49 @@ ENTRY(_DoubleExceptionVector)
.section .DebugInterruptVector.text, "ax"
ENTRY(_DebugInterruptVector)
- xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
+
+ xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
jx a0
+ENDPROC(_DebugInterruptVector)
+
+
+
+/*
+ * Medium priority level interrupt vectors
+ *
+ * Each takes less than 16 (0x10) bytes, no literals, by placing
+ * the extra 8 bytes that would otherwise be required in the window
+ * vectors area where there is space. With relocatable vectors,
+ * all vectors are within ~ 4 kB range of each other, so we can
+ * simply jump (J) to another vector without having to use JX.
+ *
+ * common_exception code gets current IRQ level in PS.INTLEVEL
+ * and preserves it for the IRQ handling time.
+ */
+
+ .macro irq_entry_level level
+
+ .if XCHAL_EXCM_LEVEL >= \level
+ .section .Level\level\()InterruptVector.text, "ax"
+ENTRY(_Level\level\()InterruptVector)
+ wsr a0, excsave2
+ rsr a0, epc\level
+ wsr a0, epc1
+ movi a0, EXCCAUSE_LEVEL1_INTERRUPT
+ wsr a0, exccause
+ rsr a0, eps\level
+ # branch to user or kernel vector
+ j _SimulateUserKernelVectorException
+ .endif
+
+ .endm
+
+ irq_entry_level 2
+ irq_entry_level 3
+ irq_entry_level 4
+ irq_entry_level 5
+ irq_entry_level 6
/* Window overflow and underflow handlers.
@@ -359,38 +646,61 @@ ENTRY(_DebugInterruptVector)
* we try to access any page that would cause a page fault early.
*/
+#define ENTRY_ALIGN64(name) \
+ .globl name; \
+ .align 64; \
+ name:
+
.section .WindowVectors.text, "ax"
/* 4-Register Window Overflow Vector (Handler) */
- .align 64
-.global _WindowOverflow4
-_WindowOverflow4:
+ENTRY_ALIGN64(_WindowOverflow4)
+
s32e a0, a5, -16
s32e a1, a5, -12
s32e a2, a5, -8
s32e a3, a5, -4
rfwo
+ENDPROC(_WindowOverflow4)
+
+
+#if XCHAL_EXCM_LEVEL >= 2
+ /* Not a window vector - but a convenient location
+ * (where we know there's space) for continuation of
+ * medium priority interrupt dispatch code.
+ * On entry here, a0 contains PS, and EPC2 contains saved a0:
+ */
+ .align 4
+_SimulateUserKernelVectorException:
+ addi a0, a0, (1 << PS_EXCM_BIT)
+ wsr a0, ps
+ bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
+ rsr a0, excsave2 # restore a0
+ j _KernelExceptionVector # simulate kernel vector exception
+1: rsr a0, excsave2 # restore a0
+ j _UserExceptionVector # simulate user vector exception
+#endif
+
/* 4-Register Window Underflow Vector (Handler) */
- .align 64
-.global _WindowUnderflow4
-_WindowUnderflow4:
+ENTRY_ALIGN64(_WindowUnderflow4)
+
l32e a0, a5, -16
l32e a1, a5, -12
l32e a2, a5, -8
l32e a3, a5, -4
rfwu
+ENDPROC(_WindowUnderflow4)
/* 8-Register Window Overflow Vector (Handler) */
- .align 64
-.global _WindowOverflow8
-_WindowOverflow8:
+ENTRY_ALIGN64(_WindowOverflow8)
+
s32e a0, a9, -16
l32e a0, a1, -12
s32e a2, a9, -8
@@ -402,11 +712,12 @@ _WindowOverflow8:
s32e a7, a0, -20
rfwo
+ENDPROC(_WindowOverflow8)
+
/* 8-Register Window Underflow Vector (Handler) */
- .align 64
-.global _WindowUnderflow8
-_WindowUnderflow8:
+ENTRY_ALIGN64(_WindowUnderflow8)
+
l32e a1, a9, -12
l32e a0, a9, -16
l32e a7, a1, -12
@@ -418,12 +729,12 @@ _WindowUnderflow8:
l32e a7, a7, -20
rfwu
+ENDPROC(_WindowUnderflow8)
/* 12-Register Window Overflow Vector (Handler) */
- .align 64
-.global _WindowOverflow12
-_WindowOverflow12:
+ENTRY_ALIGN64(_WindowOverflow12)
+
s32e a0, a13, -16
l32e a0, a1, -12
s32e a1, a13, -12
@@ -439,11 +750,12 @@ _WindowOverflow12:
s32e a11, a0, -20
rfwo
+ENDPROC(_WindowOverflow12)
+
/* 12-Register Window Underflow Vector (Handler) */
- .align 64
-.global _WindowUnderflow12
-_WindowUnderflow12:
+ENTRY_ALIGN64(_WindowUnderflow12)
+
l32e a1, a13, -12
l32e a0, a13, -16
l32e a11, a1, -12
@@ -459,6 +771,6 @@ _WindowUnderflow12:
l32e a11, a11, -20
rfwu
- .text
-
+ENDPROC(_WindowUnderflow12)
+ .text
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index ab6cdbd5eb6..d16db6df86f 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
@@ -15,20 +15,24 @@
*/
#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
-#define _NOCLANGUAGE
-#include <xtensa/config/core.h>
-#include <xtensa/config/system.h>
+#include <asm/vectors.h>
+#include <variant/core.h>
+#include <platform/hardware.h>
OUTPUT_ARCH(xtensa)
ENTRY(_start)
-#if XCHAL_MEMORY_ORDER == XTHAL_BIGENDIAN
+#ifdef __XTENSA_EB__
jiffies = jiffies_64 + 4;
#else
jiffies = jiffies_64;
#endif
-#define KERNELOFFSET 0x1000
+#ifndef KERNELOFFSET
+#define KERNELOFFSET 0xd0003000
+#endif
/* Note: In the following macros, it would be nice to specify only the
vector name and section kind and construct "sym" and "section" using
@@ -75,19 +79,17 @@ jiffies = jiffies_64;
SECTIONS
{
- . = XCHAL_KSEG_CACHED_VADDR + KERNELOFFSET;
+ . = KERNELOFFSET;
/* .text section */
_text = .;
_stext = .;
- _ftext = .;
.text :
{
- /* The .head.text section must be the first section! */
- *(.head.text)
- *(.literal .text)
- *(.srom.text)
+ /* The HEAD_TEXT section must be the first section! */
+ HEAD_TEXT
+ TEXT_TEXT
VMLINUX_SYMBOL(__sched_text_start) = .;
*(.sched.literal .sched.text)
VMLINUX_SYMBOL(__sched_text_end) = .;
@@ -97,6 +99,7 @@ SECTIONS
}
_etext = .;
+ PROVIDE (etext = .);
. = ALIGN(16);
@@ -104,117 +107,82 @@ SECTIONS
/* Relocation table */
- . = ALIGN(16);
- __boot_reloc_table_start = ABSOLUTE(.);
-
- __relocate : {
-
- RELOCATE_ENTRY(_WindowVectors_text,
- .WindowVectors.text);
-#if 0
- RELOCATE_ENTRY(_KernelExceptionVector_literal,
- .KernelExceptionVector.literal);
-#endif
- RELOCATE_ENTRY(_KernelExceptionVector_text,
- .KernelExceptionVector.text);
-#if 0
- RELOCATE_ENTRY(_UserExceptionVector_literal,
- .UserExceptionVector.literal);
-#endif
- RELOCATE_ENTRY(_UserExceptionVector_text,
- .UserExceptionVector.text);
- RELOCATE_ENTRY(_DoubleExceptionVector_literal,
- .DoubleExceptionVector.literal);
- RELOCATE_ENTRY(_DoubleExceptionVector_text,
- .DoubleExceptionVector.text);
- }
- __boot_reloc_table_end = ABSOLUTE(.) ;
-
.fixup : { *(.fixup) }
- . = ALIGN(16);
-
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
-
+ EXCEPTION_TABLE(16)
/* Data section */
- . = ALIGN(XCHAL_ICACHE_LINESIZE);
- _fdata = .;
- .data :
- {
- *(.data) CONSTRUCTORS
- . = ALIGN(XCHAL_ICACHE_LINESIZE);
- *(.data.cacheline_aligned)
- }
-
+ _sdata = .;
+ RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
- /* The initial task */
- . = ALIGN(8192);
- .data.init_task : { *(.data.init_task) }
-
/* Initialization code and data: */
- . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
+ . = ALIGN(PAGE_SIZE);
__init_begin = .;
- .init.text : {
- _sinittext = .;
- *(.init.literal) *(.init.text)
- _einittext = .;
- }
+ INIT_TEXT_SECTION(PAGE_SIZE)
.init.data :
{
- *(.init.data)
+ INIT_DATA
. = ALIGN(0x4);
__tagtable_begin = .;
*(.taglist)
__tagtable_end = .;
- }
-
- . = ALIGN(XCHAL_ICACHE_LINESIZE);
-
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
-
- __initcall_start = .;
- .initcall.init : {
- *(.initcall1.init)
- *(.initcall2.init)
- *(.initcall3.init)
- *(.initcall4.init)
- *(.initcall5.init)
- *(.initcall6.init)
- *(.initcall7.init)
- }
- __initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
+ . = ALIGN(16);
+ __boot_reloc_table_start = ABSOLUTE(.);
- SECURITY_INIT
-
- . = ALIGN(4);
+ RELOCATE_ENTRY(_WindowVectors_text,
+ .WindowVectors.text);
+#if XCHAL_EXCM_LEVEL >= 2
+ RELOCATE_ENTRY(_Level2InterruptVector_text,
+ .Level2InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ RELOCATE_ENTRY(_Level3InterruptVector_text,
+ .Level3InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ RELOCATE_ENTRY(_Level4InterruptVector_text,
+ .Level4InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ RELOCATE_ENTRY(_Level5InterruptVector_text,
+ .Level5InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ RELOCATE_ENTRY(_Level6InterruptVector_text,
+ .Level6InterruptVector.text);
+#endif
+ RELOCATE_ENTRY(_KernelExceptionVector_text,
+ .KernelExceptionVector.text);
+ RELOCATE_ENTRY(_UserExceptionVector_text,
+ .UserExceptionVector.text);
+ RELOCATE_ENTRY(_DoubleExceptionVector_literal,
+ .DoubleExceptionVector.literal);
+ RELOCATE_ENTRY(_DoubleExceptionVector_text,
+ .DoubleExceptionVector.text);
+ RELOCATE_ENTRY(_DebugInterruptVector_text,
+ .DebugInterruptVector.text);
+#if defined(CONFIG_SMP)
+ RELOCATE_ENTRY(_SecondaryResetVector_literal,
+ .SecondaryResetVector.literal);
+ RELOCATE_ENTRY(_SecondaryResetVector_text,
+ .SecondaryResetVector.text);
+#endif
- __start___ftr_fixup = .;
- __ftr_fixup : { *(__ftr_fixup) }
- __stop___ftr_fixup = .;
+
+ __boot_reloc_table_end = ABSOLUTE(.) ;
- . = ALIGN(32);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ INIT_SETUP(XCHAL_ICACHE_LINESIZE)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ INIT_RAM_FS
+ }
- . = ALIGN(4096);
- __initramfs_start =.;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
+ PERCPU_SECTION(XCHAL_ICACHE_LINESIZE)
/* We need this dummy segment here */
@@ -225,61 +193,117 @@ SECTIONS
SECTION_VECTOR (_WindowVectors_text,
.WindowVectors.text,
- XCHAL_WINDOW_VECTORS_VADDR, 4,
+ WINDOW_VECTORS_VADDR, 4,
.dummy)
SECTION_VECTOR (_DebugInterruptVector_literal,
.DebugInterruptVector.literal,
- XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL) - 4,
+ DEBUG_VECTOR_VADDR - 4,
SIZEOF(.WindowVectors.text),
.WindowVectors.text)
SECTION_VECTOR (_DebugInterruptVector_text,
.DebugInterruptVector.text,
- XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL),
+ DEBUG_VECTOR_VADDR,
4,
.DebugInterruptVector.literal)
+#undef LAST
+#define LAST .DebugInterruptVector.text
+#if XCHAL_EXCM_LEVEL >= 2
+ SECTION_VECTOR (_Level2InterruptVector_text,
+ .Level2InterruptVector.text,
+ INTLEVEL2_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level2InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ SECTION_VECTOR (_Level3InterruptVector_text,
+ .Level3InterruptVector.text,
+ INTLEVEL3_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level3InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ SECTION_VECTOR (_Level4InterruptVector_text,
+ .Level4InterruptVector.text,
+ INTLEVEL4_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level4InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ SECTION_VECTOR (_Level5InterruptVector_text,
+ .Level5InterruptVector.text,
+ INTLEVEL5_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level5InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ SECTION_VECTOR (_Level6InterruptVector_text,
+ .Level6InterruptVector.text,
+ INTLEVEL6_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level6InterruptVector.text
+#endif
SECTION_VECTOR (_KernelExceptionVector_literal,
.KernelExceptionVector.literal,
- XCHAL_KERNELEXC_VECTOR_VADDR - 4,
- SIZEOF(.DebugInterruptVector.text),
- .DebugInterruptVector.text)
+ KERNEL_VECTOR_VADDR - 4,
+ SIZEOF(LAST), LAST)
+#undef LAST
SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text,
- XCHAL_KERNELEXC_VECTOR_VADDR,
+ KERNEL_VECTOR_VADDR,
4,
.KernelExceptionVector.literal)
SECTION_VECTOR (_UserExceptionVector_literal,
.UserExceptionVector.literal,
- XCHAL_USEREXC_VECTOR_VADDR - 4,
+ USER_VECTOR_VADDR - 4,
SIZEOF(.KernelExceptionVector.text),
.KernelExceptionVector.text)
SECTION_VECTOR (_UserExceptionVector_text,
.UserExceptionVector.text,
- XCHAL_USEREXC_VECTOR_VADDR,
+ USER_VECTOR_VADDR,
4,
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
- XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
+ DOUBLEEXC_VECTOR_VADDR - 40,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
- XCHAL_DOUBLEEXC_VECTOR_VADDR,
- 32,
+ DOUBLEEXC_VECTOR_VADDR,
+ 40,
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
- . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
+
+#if defined(CONFIG_SMP)
+
+ SECTION_VECTOR (_SecondaryResetVector_literal,
+ .SecondaryResetVector.literal,
+ RESET_VECTOR1_VADDR - 4,
+ SIZEOF(.DoubleExceptionVector.text),
+ .DoubleExceptionVector.text)
+
+ SECTION_VECTOR (_SecondaryResetVector_text,
+ .SecondaryResetVector.text,
+ RESET_VECTOR1_VADDR,
+ 4,
+ .SecondaryResetVector.literal)
+
+ . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
+
+#endif
+
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
- . = ALIGN(8192);
+ BSS_SECTION(0, 8192, 0)
- /* BSS section */
- _bss_start = .;
- .sbss : { *(.sbss) *(.scommon) }
- .bss : { *(COMMON) *(.bss) }
- _bss_end = .;
_end = .;
/* only used by the boot loader */
@@ -287,27 +311,29 @@ SECTIONS
. = ALIGN(0x10);
.bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
- . = ALIGN(0x1000);
- __initrd_start = .;
- .initrd : { *(.initrd) }
- __initrd_end = .;
-
- .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+ .ResetVector.text RESET_VECTOR_VADDR :
{
*(.ResetVector.text)
}
- /* Sections to be discarded */
- /DISCARD/ :
+ /*
+ * This is a remapped copy of the Secondary Reset Vector Code.
+ * It keeps gdb in sync with the PC after switching
+ * to the temporary mapping used while setting up
+ * the V2 MMU mappings for Linux.
+ *
+ * Only debug information about this section is put in the kernel image.
+ */
+ .SecondaryResetVector.remapped_text 0x46000000 (INFO):
{
- *(.text.exit)
- *(.text.exit.literal)
- *(.data.exit)
- *(.exitcall.exit)
+ *(.SecondaryResetVector.remapped_text)
}
+ .xt.lit : { *(.xt.lit) }
+ .xt.prop : { *(.xt.prop) }
+
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
.debug_srcinfo 0 : { *(.debug_srcinfo) }
@@ -337,4 +363,8 @@ SECTIONS
*(.xt.lit)
*(.gnu.linkonce.p*)
}
+
+ /* Sections to be discarded */
+ DISCARDS
+ /DISCARD/ : { *(.exit.literal) }
}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 0b4cb93db5a..4d2872fd9bb 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -18,16 +18,15 @@
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <linux/in6.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
#include <asm/checksum.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/semaphore.h>
+#include <asm/ftrace.h>
#ifdef CONFIG_BLK_DEV_FD
#include <asm/floppy.h>
#endif
@@ -39,22 +38,14 @@
/*
* String functions
*/
-EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strstr);
-
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
-EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(__strncpy_user);
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
+
+EXPORT_SYMBOL(empty_zero_page);
/*
* gcc internal math functions
@@ -70,6 +61,7 @@ extern unsigned int __udivsi3(unsigned int, unsigned int);
extern unsigned int __umodsi3(unsigned int, unsigned int);
extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
+extern int __ucmpdi2(int, int);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
@@ -82,19 +74,31 @@ EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__umoddi3);
+EXPORT_SYMBOL(__ucmpdi2);
-/*
- * Semaphore operations
- */
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down_trylock);
-EXPORT_SYMBOL(__up);
+void __xtensa_libgcc_window_spill(void)
+{
+ BUG();
+}
+EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
+
+unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v)
+{
+ BUG();
+}
+EXPORT_SYMBOL(__sync_fetch_and_and_4);
+
+unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
+{
+ BUG();
+}
+EXPORT_SYMBOL(__sync_fetch_and_or_4);
#ifdef CONFIG_NET
/*
* Networking support
*/
+EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
#endif /* CONFIG_NET */
@@ -102,6 +106,7 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
* Architecture-specific symbols
*/
EXPORT_SYMBOL(__xtensa_copy_user);
+EXPORT_SYMBOL(__invalidate_icache_range);
/*
* Kernel hacking ...
@@ -117,3 +122,15 @@ EXPORT_SYMBOL(outsl);
EXPORT_SYMBOL(insb);
EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
+
+extern long common_exception_return;
+EXPORT_SYMBOL(common_exception_return);
+
+#ifdef CONFIG_FUNCTION_TRACER
+EXPORT_SYMBOL(_mcount);
+#endif
+
+EXPORT_SYMBOL(__invalidate_dcache_range);
+#if XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(__flush_dcache_range);
+#endif