diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-07-03 10:25:08 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-07-03 10:25:08 -0400 |
commit | 0a1340c185734a57fbf4775927966ad4a1347b02 (patch) | |
tree | d9ed8f0dd809a7c542a3356601125ea5b5aaa804 /arch/arm/kernel | |
parent | af18ddb8864b096e3ed4732e2d4b21c956dcfe3a (diff) | |
parent | 29454dde27d8e340bb1987bad9aa504af7081eba (diff) |
Merge rsync://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
include/linux/kernel.h
Diffstat (limited to 'arch/arm/kernel')
28 files changed, 820 insertions, 1183 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index a601b8b55f3..f0c0cdb1c18 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -22,7 +22,10 @@ obj-$(CONFIG_PCI) += bios32.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o -obj-$(CONFIG_IWMMXT) += iwmmxt.o +obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o +AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 + +obj-$(CONFIG_IWMMXT) += iwmmxt.o iwmmxt-notifier.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt ifneq ($(CONFIG_ARCH_EBSA110),y) diff --git a/arch/arm/kernel/apm.c b/arch/arm/kernel/apm.c index 2bed290fec7..33c55689f99 100644 --- a/arch/arm/kernel/apm.c +++ b/arch/arm/kernel/apm.c @@ -10,7 +10,6 @@ * [This document is available from Microsoft at: * http://www.microsoft.com/hwdev/busbios/amp_12.htm] */ -#include <linux/config.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/timer.h> diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index c49b5d4d7fc..da69e660574 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -109,11 +109,13 @@ EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(__memzero); /* user mem (segment) */ -EXPORT_SYMBOL(__arch_copy_from_user); -EXPORT_SYMBOL(__arch_copy_to_user); -EXPORT_SYMBOL(__arch_clear_user); -EXPORT_SYMBOL(__arch_strnlen_user); -EXPORT_SYMBOL(__arch_strncpy_from_user); +EXPORT_SYMBOL(__strnlen_user); +EXPORT_SYMBOL(__strncpy_from_user); + +#ifdef CONFIG_MMU +EXPORT_SYMBOL(__copy_from_user); +EXPORT_SYMBOL(__copy_to_user); +EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); @@ -123,6 +125,7 @@ EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_8); +#endif /* crypto hash */ EXPORT_SYMBOL(sha_transform); diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 396efba9bac..cc2d58d028e 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -60,6 +60,9 @@ int main(void) #ifdef CONFIG_IWMMXT DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt)); #endif +#ifdef CONFIG_CRUNCH + DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate)); +#endif BLANK(); DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0)); DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1)); @@ -102,6 +105,7 @@ int main(void) BLANK(); DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list)); DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush)); - DEFINE(PROCINFO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mmu_flags)); + DEFINE(PROCINFO_MM_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mm_mmu_flags)); + DEFINE(PROCINFO_IO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_io_mmu_flags)); return 0; } diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index de606dfa8db..964faac104f 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -5,7 +5,6 @@ * * Bits taken from various places. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> @@ -304,7 +303,7 @@ static inline int pdev_bad_for_parity(struct pci_dev *dev) static void __devinit pdev_fixup_device_resources(struct pci_sys_data *root, struct pci_dev *dev) { - unsigned long offset; + resource_size_t offset; int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) { @@ -634,9 +633,9 @@ char * __init pcibios_setup(char *str) * which might be mirrored at 0x0100-0x03ff.. */ void pcibios_align_resource(void *data, struct resource *res, - unsigned long size, unsigned long align) + resource_size_t size, resource_size_t align) { - unsigned long start = res->start; + resource_size_t start = res->start; if (res->flags & IORESOURCE_IO && start & 0x300) start = (start + 0x3ff) & ~0x3ff; @@ -702,7 +701,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, /* * Mark this as IO */ - vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (remap_pfn_range(vma, vma->vm_start, phys, diff --git a/arch/arm/kernel/compat.c b/arch/arm/kernel/compat.c index 60cfa7f3226..0a1385442f4 100644 --- a/arch/arm/kernel/compat.c +++ b/arch/arm/kernel/compat.c @@ -15,7 +15,6 @@ * the kernel for 5 years from now (2001). This will allow boot loaders * to convert to the new struct tag way. */ -#include <linux/config.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> diff --git a/arch/arm/kernel/crunch-bits.S b/arch/arm/kernel/crunch-bits.S new file mode 100644 index 00000000000..a26886758c6 --- /dev/null +++ b/arch/arm/kernel/crunch-bits.S @@ -0,0 +1,305 @@ +/* + * arch/arm/kernel/crunch-bits.S + * Cirrus MaverickCrunch context switching and handling + * + * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * + * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is + * Copyright (c) 2003-2004, MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/linkage.h> +#include <asm/ptrace.h> +#include <asm/thread_info.h> +#include <asm/asm-offsets.h> +#include <asm/arch/ep93xx-regs.h> + +/* + * We can't use hex constants here due to a bug in gas. + */ +#define CRUNCH_MVDX0 0 +#define CRUNCH_MVDX1 8 +#define CRUNCH_MVDX2 16 +#define CRUNCH_MVDX3 24 +#define CRUNCH_MVDX4 32 +#define CRUNCH_MVDX5 40 +#define CRUNCH_MVDX6 48 +#define CRUNCH_MVDX7 56 +#define CRUNCH_MVDX8 64 +#define CRUNCH_MVDX9 72 +#define CRUNCH_MVDX10 80 +#define CRUNCH_MVDX11 88 +#define CRUNCH_MVDX12 96 +#define CRUNCH_MVDX13 104 +#define CRUNCH_MVDX14 112 +#define CRUNCH_MVDX15 120 +#define CRUNCH_MVAX0L 128 +#define CRUNCH_MVAX0M 132 +#define CRUNCH_MVAX0H 136 +#define CRUNCH_MVAX1L 140 +#define CRUNCH_MVAX1M 144 +#define CRUNCH_MVAX1H 148 +#define CRUNCH_MVAX2L 152 +#define CRUNCH_MVAX2M 156 +#define CRUNCH_MVAX2H 160 +#define CRUNCH_MVAX3L 164 +#define CRUNCH_MVAX3M 168 +#define CRUNCH_MVAX3H 172 +#define CRUNCH_DSPSC 176 + +#define CRUNCH_SIZE 184 + + .text + +/* + * Lazy switching of crunch coprocessor context + * + * r10 = struct thread_info pointer + * r9 = ret_from_exception + * lr = undefined instr exit + * + * called from prefetch exception handler with interrupts disabled + */ +ENTRY(crunch_task_enable) + ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr + + ldr r1, [r8, #0x80] + tst r1, #0x00800000 @ access to crunch enabled? + movne pc, lr @ if so no business here + mov r3, #0xaa @ unlock syscon swlock + str r3, [r8, #0xc0] + orr r1, r1, #0x00800000 @ enable access to crunch + str r1, [r8, #0x80] + + ldr r3, =crunch_owner + add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area + ldr r2, [sp, #60] @ current task pc value + ldr r1, [r3] @ get current crunch owner + str r0, [r3] @ this task now owns crunch + sub r2, r2, #4 @ adjust pc back + str r2, [sp, #60] + + ldr r2, [r8, #0x80] + mov r2, r2 @ flush out enable (@@@) + + teq r1, #0 @ test for last ownership + mov lr, r9 @ normal exit from exception + beq crunch_load @ no owner, skip save + +crunch_save: + cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers + cfstr64 mvdx1, [r1, #CRUNCH_MVDX1] + cfstr64 mvdx2, [r1, #CRUNCH_MVDX2] + cfstr64 mvdx3, [r1, #CRUNCH_MVDX3] + cfstr64 mvdx4, [r1, #CRUNCH_MVDX4] + cfstr64 mvdx5, [r1, #CRUNCH_MVDX5] + cfstr64 mvdx6, [r1, #CRUNCH_MVDX6] + cfstr64 mvdx7, [r1, #CRUNCH_MVDX7] + cfstr64 mvdx8, [r1, #CRUNCH_MVDX8] + cfstr64 mvdx9, [r1, #CRUNCH_MVDX9] + cfstr64 mvdx10, [r1, #CRUNCH_MVDX10] + cfstr64 mvdx11, [r1, #CRUNCH_MVDX11] + cfstr64 mvdx12, [r1, #CRUNCH_MVDX12] + cfstr64 mvdx13, [r1, #CRUNCH_MVDX13] + cfstr64 mvdx14, [r1, #CRUNCH_MVDX14] + cfstr64 mvdx15, [r1, #CRUNCH_MVDX15] + +#ifdef __ARMEB__ +#error fix me for ARMEB +#endif + + cfmv32al mvfx0, mvax0 @ save 72b accumulators + cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L] + cfmv32am mvfx0, mvax0 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M] + cfmv32ah mvfx0, mvax0 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H] + cfmv32al mvfx0, mvax1 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L] + cfmv32am mvfx0, mvax1 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M] + cfmv32ah mvfx0, mvax1 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H] + cfmv32al mvfx0, mvax2 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L] + cfmv32am mvfx0, mvax2 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M] + cfmv32ah mvfx0, mvax2 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H] + cfmv32al mvfx0, mvax3 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L] + cfmv32am mvfx0, mvax3 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M] + cfmv32ah mvfx0, mvax3 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H] + + cfmv32sc mvdx0, dspsc @ save status word + cfstr64 mvdx0, [r1, #CRUNCH_DSPSC] + + teq r0, #0 @ anything to load? + cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered + moveq pc, lr + +crunch_load: + cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word + cfmvsc32 dspsc, mvdx0 + + cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators + cfmval32 mvax0, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M] + cfmvam32 mvax0, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H] + cfmvah32 mvax0, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L] + cfmval32 mvax1, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M] + cfmvam32 mvax1, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H] + cfmvah32 mvax1, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L] + cfmval32 mvax2, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M] + cfmvam32 mvax2, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H] + cfmvah32 mvax2, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L] + cfmval32 mvax3, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M] + cfmvam32 mvax3, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H] + cfmvah32 mvax3, mvfx0 + + cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers + cfldr64 mvdx1, [r0, #CRUNCH_MVDX1] + cfldr64 mvdx2, [r0, #CRUNCH_MVDX2] + cfldr64 mvdx3, [r0, #CRUNCH_MVDX3] + cfldr64 mvdx4, [r0, #CRUNCH_MVDX4] + cfldr64 mvdx5, [r0, #CRUNCH_MVDX5] + cfldr64 mvdx6, [r0, #CRUNCH_MVDX6] + cfldr64 mvdx7, [r0, #CRUNCH_MVDX7] + cfldr64 mvdx8, [r0, #CRUNCH_MVDX8] + cfldr64 mvdx9, [r0, #CRUNCH_MVDX9] + cfldr64 mvdx10, [r0, #CRUNCH_MVDX10] + cfldr64 mvdx11, [r0, #CRUNCH_MVDX11] + cfldr64 mvdx12, [r0, #CRUNCH_MVDX12] + cfldr64 mvdx13, [r0, #CRUNCH_MVDX13] + cfldr64 mvdx14, [r0, #CRUNCH_MVDX14] + cfldr64 mvdx15, [r0, #CRUNCH_MVDX15] + + mov pc, lr + +/* + * Back up crunch regs to save area and disable access to them + * (mainly for gdb or sleep mode usage) + * + * r0 = struct thread_info pointer of target task or NULL for any + */ +ENTRY(crunch_task_disable) + stmfd sp!, {r4, r5, lr} + + mrs ip, cpsr + orr r2, ip, #PSR_I_BIT @ disable interrupts + msr cpsr_c, r2 + + ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr + + ldr r3, =crunch_owner + add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area + ldr r1, [r3] @ get current crunch owner + teq r1, #0 @ any current owner? + beq 1f @ no: quit + teq r0, #0 @ any owner? + teqne r1, r2 @ or specified one? + bne 1f @ no: quit + + ldr r5, [r4, #0x80] @ enable access to crunch + mov r2, #0xaa + str r2, [r4, #0xc0] + orr r5, r5, #0x00800000 + str r5, [r4, #0x80] + + mov r0, #0 @ nothing to load + str r0, [r3] @ no more current owner + ldr r2, [r4, #0x80] @ flush out enable (@@@) + mov r2, r2 + bl crunch_save + + mov r2, #0xaa @ disable access to crunch + str r2, [r4, #0xc0] + bic r5, r5, #0x00800000 + str r5, [r4, #0x80] + ldr r5, [r4, #0x80] @ flush out enable (@@@) + mov r5, r5 + +1: msr cpsr_c, ip @ restore interrupt mode + ldmfd sp!, {r4, r5, pc} + +/* + * Copy crunch state to given memory address + * + * r0 = struct thread_info pointer of target task + * r1 = memory address where to store crunch state + * + * this is called mainly in the creation of signal stack frames + */ +ENTRY(crunch_task_copy) + mrs ip, cpsr + orr r2, ip, #PSR_I_BIT @ disable interrupts + msr cpsr_c, r2 + + ldr r3, =crunch_owner + add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area + ldr r3, [r3] @ get current crunch owner + teq r2, r3 @ does this task own it... + beq 1f + + @ current crunch values are in the task save area + msr cpsr_c, ip @ restore interrupt mode + mov r0, r1 + mov r1, r2 + mov r2, #CRUNCH_SIZE + b memcpy + +1: @ this task owns crunch regs -- grab a copy from there + mov r0, #0 @ nothing to load + mov r3, lr @ preserve return address + bl crunch_save + msr cpsr_c, ip @ restore interrupt mode + mov pc, r3 + +/* + * Restore crunch state from given memory address + * + * r0 = struct thread_info pointer of target task + * r1 = memory address where to get crunch state from + * + * this is used to restore crunch state when unwinding a signal stack frame + */ +ENTRY(crunch_task_restore) + mrs ip, cpsr + orr r2, ip, #PSR_I_BIT @ disable interrupts + msr cpsr_c, r2 + + ldr r3, =crunch_owner + add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area + ldr r3, [r3] @ get current crunch owner + teq r2, r3 @ does this task own it... + beq 1f + + @ this task doesn't own crunch regs -- use its save area + msr cpsr_c, ip @ restore interrupt mode + mov r0, r2 + mov r2, #CRUNCH_SIZE + b memcpy + +1: @ this task owns crunch regs -- load them directly + mov r0, r1 + mov r1, #0 @ nothing to save + mov r3, lr @ preserve return address + bl crunch_load + msr cpsr_c, ip @ restore interrupt mode + mov pc, r3 diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c new file mode 100644 index 00000000000..748175921f9 --- /dev/null +++ b/arch/arm/kernel/crunch.c @@ -0,0 +1,83 @@ +/* + * arch/arm/kernel/crunch.c + * Cirrus MaverickCrunch context switching and handling + * + * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/config.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <asm/arch/ep93xx-regs.h> +#include <asm/thread_notify.h> +#include <asm/io.h> + +struct crunch_state *crunch_owner; + +void crunch_task_release(struct thread_info *thread) +{ + local_irq_disable(); + if (crunch_owner == &thread->crunchstate) + crunch_owner = NULL; + local_irq_enable(); +} + +static int crunch_enabled(u32 devcfg) +{ + return !!(devcfg & EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE); +} + +static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) +{ + struct thread_info *thread = (struct thread_info *)t; + struct crunch_state *crunch_state; + u32 devcfg; + + crunch_state = &thread->crunchstate; + + switch (cmd) { + case THREAD_NOTIFY_FLUSH: + memset(crunch_state, 0, sizeof(*crunch_state)); + + /* + * FALLTHROUGH: Ensure we don't try to overwrite our newly + * initialised state information on the first fault. + */ + + case THREAD_NOTIFY_RELEASE: + crunch_task_release(thread); + break; + + case THREAD_NOTIFY_SWITCH: + devcfg = __raw_readl(EP93XX_SYSCON_DEVICE_CONFIG); + if (crunch_enabled(devcfg) || crunch_owner == crunch_state) { + devcfg ^= EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE; + __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); + __raw_writel(devcfg, EP93XX_SYSCON_DEVICE_CONFIG); + } + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block crunch_notifier_block = { + .notifier_call = crunch_do, +}; + +static int __init crunch_init(void) +{ + thread_register_notifier(&crunch_notifier_block); + + return 0; +} + +late_initcall(crunch_init); diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index da280bae3d0..a5747e58a9d 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -9,7 +9,6 @@ * * 32-bit debugging code */ -#include <linux/config.h> #include <linux/linkage.h> .text diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index 00aa225e8d9..ab4ad9562ee 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c @@ -27,7 +27,6 @@ */ #define ECARD_C -#include <linux/config.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> @@ -620,7 +619,7 @@ ecard_irqexp_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *reg ecard_t *ec = slot_to_ecard(slot); if (ec->claimed) { - struct irqdesc *d = irqdesc + ec->irq; + struct irq_desc *d = irq_desc + ec->irq; /* * this ugly code is so that we can operate a * prioritorising system: diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index ab8e600c18c..7ea5f01dfc7 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -14,12 +14,12 @@ * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes * it to save wrong values... Be aware! */ -#include <linux/config.h> #include <asm/memory.h> #include <asm/glue.h> #include <asm/vfpmacros.h> #include <asm/arch/entry-macro.S> +#include <asm/thread_notify.h> #include "entry-header.S" @@ -491,9 +491,15 @@ call_fpe: b do_fpe @ CP#1 (FPE) b do_fpe @ CP#2 (FPE) mov pc, lr @ CP#3 +#ifdef CONFIG_CRUNCH + b crunch_task_enable @ CP#4 (MaverickCrunch) + b crunch_task_enable @ CP#5 (MaverickCrunch) + b crunch_task_enable @ CP#6 (MaverickCrunch) +#else mov pc, lr @ CP#4 mov pc, lr @ CP#5 mov pc, lr @ CP#6 +#endif mov pc, lr @ CP#7 mov pc, lr @ CP#8 mov pc, lr @ CP#9 @@ -560,10 +566,8 @@ ENTRY(__switch_to) add ip, r1, #TI_CPU_SAVE ldr r3, [r2, #TI_TP_VALUE] stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack -#ifndef CONFIG_MMU - add r2, r2, #TI_CPU_DOMAIN -#else - ldr r6, [r2, #TI_CPU_DOMAIN]! +#ifdef CONFIG_MMU + ldr r6, [r2, #TI_CPU_DOMAIN] #endif #if __LINUX_ARM_ARCH__ >= 6 #ifdef CONFIG_CPU_32v6K @@ -585,21 +589,18 @@ ENTRY(__switch_to) #ifdef CONFIG_MMU mcr p15, 0, r6, c3, c0, 0 @ Set domain register #endif -#ifdef CONFIG_VFP - @ Always disable VFP so we can lazily save/restore the old - @ state. This occurs in the context of the previous thread. - VFPFMRX r4, FPEXC - bic r4, r4, #FPEXC_ENABLE - VFPFMXR FPEXC, r4 -#endif -#if defined(CONFIG_IWMMXT) - bl iwmmxt_task_switch -#elif defined(CONFIG_CPU_XSCALE) - add r4, r2, #40 @ cpu_context_save->extra +#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT) + add r4, r2, #TI_CPU_DOMAIN + 40 @ cpu_context_save->extra ldmib r4, {r4, r5} mar acc0, r4, r5 #endif - ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously + mov r5, r0 + add r4, r2, #TI_CPU_SAVE + ldr r0, =thread_notify_head + mov r1, #THREAD_NOTIFY_SWITCH + bl atomic_notifier_call_chain + mov r0, r5 + ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously __INIT diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index dbcb11a31f7..6f5e7c50d42 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -7,7 +7,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/config.h> #include <asm/unistd.h> @@ -271,7 +270,7 @@ ENTRY(sys_call_table) @ r8 = syscall table .type sys_syscall, #function sys_syscall: - eor scno, r0, #__NR_OABI_SYSCALL_BASE + bic scno, r0, #__NR_OABI_SYSCALL_BASE cmp scno, #__NR_syscall - __NR_SYSCALL_BASE cmpne scno, #NR_syscalls @ check range stmloia sp, {r5, r6} @ shuffle args @@ -340,7 +339,7 @@ sys_mmap2: streq r5, [sp, #4] beq do_mmap2 mov r0, #-EINVAL - RETINSTR(mov,pc, lr) + mov pc, lr #else str r5, [sp, #4] b do_mmap2 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index f1c2fd5b63e..87ab4e15799 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -1,4 +1,3 @@ -#include <linux/config.h> #include <linux/init.h> #include <linux/linkage.h> diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 1ec3f7faa25..e8e90346f11 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -38,6 +38,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> +#include <linux/interrupt.h> #include <linux/seq_file.h> #include <asm/cacheflush.h> diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index adf62e5eaad..ac9eb3d3051 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -12,7 +12,6 @@ * for 32-bit CPUs which has a process ID register(CP15). * */ -#include <linux/config.h> #include <linux/linkage.h> #include <linux/init.h> @@ -39,7 +38,7 @@ __INIT .type stext, %function ENTRY(stext) - msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode + msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode @ and irqs disabled mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type @ r5=procinfo r9=cpuid diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 04f7344e356..2242f5f7cb7 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -11,7 +11,6 @@ * * Kernel startup code for all 32-bit CPUs */ -#include <linux/config.h> #include <linux/linkage.h> #include <linux/init.h> @@ -71,7 +70,7 @@ __INIT .type stext, %function ENTRY(stext) - msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode + msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode @ and irqs disabled mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type @ r5=procinfo r9=cpuid @@ -104,7 +103,7 @@ ENTRY(secondary_startup) * the processor type - there is no need to check the machine type * as it has already been validated by the primary processor. */ - msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC + msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type movs r10, r5 @ invalid processor? @@ -221,7 +220,7 @@ __create_page_tables: teq r0, r6 bne 1b - ldr r7, [r10, #PROCINFO_MMUFLAGS] @ mmuflags + ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags /* * Create identity mapping for first MB of kernel to @@ -272,8 +271,7 @@ __create_page_tables: #endif #ifdef CONFIG_DEBUG_LL - bic r7, r7, #0x0c @ turn off cacheable - @ and bufferable bits + ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags /* * Map in IO space for serial debugging. * This allows debug messages to be output diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2d5896b3618..c3d4e94ef5b 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -21,12 +21,12 @@ * IRQ's are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */ -#include <linux/config.h> #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/random.h> @@ -38,192 +38,18 @@ #include <linux/kallsyms.h> #include <linux/proc_fs.h> -#include <asm/irq.h> #include <asm/system.h> -#include <asm/mach/irq.h> #include <asm/mach/time.h> /* - * Maximum IRQ count. Currently, this is arbitary. However, it should - * not be set too low to prevent false triggering. Conversely, if it - * is set too high, then you could miss a stuck IRQ. - * - * Maybe we ought to set a timer and re-enable the IRQ at a later time? - */ -#define MAX_IRQ_CNT 100000 - -static int noirqdebug; -static volatile unsigned long irq_err_count; -static DEFINE_SPINLOCK(irq_controller_lock); -static LIST_HEAD(irq_pending); - -struct irqdesc irq_desc[NR_IRQS]; -void (*init_arch_irq)(void) __initdata = NULL; - -/* * No architecture-specific irq_finish function defined in arm/arch/irqs.h. */ #ifndef irq_finish #define irq_finish(irq) do { } while (0) #endif -/* - * Dummy mask/unmask handler - */ -void dummy_mask_unmask_irq(unsigned int irq) -{ -} - -irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs) -{ - return IRQ_NONE; -} - -void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) -{ - irq_err_count += 1; - printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); -} - -static struct irqchip bad_chip = { - .ack = dummy_mask_unmask_irq, - .mask = dummy_mask_unmask_irq, - .unmask = dummy_mask_unmask_irq, -}; - -static struct irqdesc bad_irq_desc = { - .chip = &bad_chip, - .handle = do_bad_IRQ, - .pend = LIST_HEAD_INIT(bad_irq_desc.pend), - .disable_depth = 1, -}; - -#ifdef CONFIG_SMP -void synchronize_irq(unsigned int irq) -{ - struct irqdesc *desc = irq_desc + irq; - - while (desc->running) - barrier(); -} -EXPORT_SYMBOL(synchronize_irq); - -#define smp_set_running(desc) do { desc->running = 1; } while (0) -#define smp_clear_running(desc) do { desc->running = 0; } while (0) -#else -#define smp_set_running(desc) do { } while (0) -#define smp_clear_running(desc) do { } while (0) -#endif - -/** - * disable_irq_nosync - disable an irq without waiting - * @irq: Interrupt to disable - * - * Disable the selected interrupt line. Enables and disables - * are nested. We do this lazily. - * - * This function may be called from IRQ context. - */ -void disable_irq_nosync(unsigned int irq) -{ - struct irqdesc *desc = irq_desc + irq; - unsigned long flags; - - spin_lock_irqsave(&irq_controller_lock, flags); - desc->disable_depth++; - list_del_init(&desc->pend); - spin_unlock_irqrestore(&irq_controller_lock, flags); -} -EXPORT_SYMBOL(disable_irq_nosync); - -/** - * disable_irq - disable an irq and wait for completion - * @irq: Interrupt to disable - * - * Disable the selected interrupt line. Enables and disables - * are nested. This functions waits for any pending IRQ - * handlers for this interrupt to complete before returning. - * If you use this function while holding a resource the IRQ - * handler may need you will deadlock. - * - * This function may be called - with care - from IRQ context. - */ -void disable_irq(unsigned int irq) -{ - struct irqdesc *desc = irq_desc + irq; - - disable_irq_nosync(irq); - if (desc->action) - synchronize_irq(irq); -} -EXPORT_SYMBOL(disable_irq); - -/** - * enable_irq - enable interrupt handling on an irq - * @irq: Interrupt to enable - * - * Re-enables the processing of interrupts on this IRQ line. - * Note that this may call the interrupt handler, so you may - * get unexpected results if you hold IRQs disabled. - * - * This function may be called from IRQ context. - */ -void enable_irq(unsigned int irq) -{ - struct irqdesc *desc = irq_desc + irq; - unsigned long flags; - - spin_lock_irqsave(&irq_controller_lock, flags); - if (unlikely(!desc->disable_depth)) { - printk("enable_irq(%u) unbalanced from %p\n", irq, - __builtin_return_address(0)); - } else if (!--desc->disable_depth) { - desc->probing = 0; - desc->chip->unmask(irq); - - /* - * If the interrupt is waiting to be processed, - * try to re-run it. We can't directly run it - * from here since the caller might be in an - * interrupt-protected region. - */ - if (desc->pending && list_empty(&desc->pend)) { - desc->pending = 0; - if (!desc->chip->retrigger || - desc->chip->retrigger(irq)) - list_add(&desc->pend, &irq_pending); - } - } - spin_unlock_irqrestore(&irq_controller_lock, flags); -} -EXPORT_SYMBOL(enable_irq); - -/* - * Enable wake on selected irq - */ -void enable_irq_wake(unsigned int irq) -{ - struct irqdesc *desc = irq_desc + irq; - unsigned long flags; - - spin_lock_irqsave(&irq_controller_lock, flags); - if (desc->chip->set_wake) - desc->chip->set_wake(irq, 1); - spin_unlock_irqrestore(&irq_controller_lock, flags); -} -EXPORT_SYMBOL(enable_irq_wake); - -void disable_irq_wake(unsigned int irq) -{ - struct irqdesc *desc = irq_desc + irq; - unsigned long flags; - - spin_lock_irqsave(&irq_controller_lock, flags); - if (desc->chip->set_wake) - desc->chip->set_wake(irq, 0); - spin_unlock_irqrestore(&irq_controller_lock, flags); -} -EXPORT_SYMBOL(disable_irq_wake); +void (*init_arch_irq)(void) __initdata = NULL; +unsigned long irq_err_count; int show_interrupts(struct seq_file *p, void *v) { @@ -243,8 +69,8 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_controller_lock, flags); - action = irq_desc[i].action; + spin_lock_irqsave(&irq_desc[i].lock, flags); + action = irq_desc[i].action; if (!action) goto unlock; @@ -257,7 +83,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_controller_lock, flags); + spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { #ifdef CONFIG_ARCH_ACORN show_fiq_list(p, v); @@ -271,267 +97,11 @@ unlock: return 0; } -/* - * IRQ lock detection. - * - * Hopefully, this should get us out of a few locked situations. - * However, it may take a while for this to happen, since we need - * a large number if IRQs to appear in the same jiffie with the - * same instruction pointer (or within 2 instructions). - */ -static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs) -{ - unsigned long instr_ptr = instruction_pointer(regs); - - if (desc->lck_jif == jiffies && - desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) { - desc->lck_cnt += 1; - - if (desc->lck_cnt > MAX_IRQ_CNT) { - printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq); - return 1; - } - } else { - desc->lck_cnt = 0; - desc->lck_pc = instruction_pointer(regs); - desc->lck_jif = jiffies; - } - return 0; -} - -static void -report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret) -{ - static int count = 100; - struct irqaction *action; - - if (noirqdebug) - return; - - if (ret != IRQ_HANDLED && ret != IRQ_NONE) { - if (!count) - return; - count--; - printk("irq%u: bogus retval mask %x\n", irq, ret); - } else { - desc->irqs_unhandled++; - if (desc->irqs_unhandled <= 99900) - return; - desc->irqs_unhandled = 0; - printk("irq%u: nobody cared\n", irq); - } - show_regs(regs); - dump_stack(); - printk(KERN_ERR "handlers:"); - action = desc->action; - do { - printk("\n" KERN_ERR "[<%p>]", action->handler); - print_symbol(" (%s)", (unsigned long)action->handler); - action = action->next; - } while (action); - printk("\n"); -} - -static int -__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs) -{ - unsigned int status; - int ret, retval = 0; - - spin_unlock(&irq_controller_lock); - -#ifdef CONFIG_NO_IDLE_HZ - if (!(action->flags & SA_TIMER) && system_timer->dyn_tick != NULL) { - write_seqlock(&xtime_lock); - if (system_timer->dyn_tick->state & DYN_TICK_ENABLED) - system_timer->dyn_tick->handler(irq, 0, regs); - write_sequnlock(&xtime_lock); - } -#endif - - if (!(action->flags & SA_INTERRUPT)) - local_irq_enable(); - - status = 0; - do { - ret = action->handler(irq, action->dev_id, regs); - if (ret == IRQ_HANDLED) - status |= action->flags; - retval |= ret; - action = action->next; - } while (action); - - if (status & SA_SAMPLE_RANDOM) - add_interrupt_randomness(irq); - - spin_lock_irq(&irq_controller_lock); - - return retval; -} - -/* - * This is for software-decoded IRQs. The caller is expected to - * handle the ack, clear, mask and unmask issues. - */ -void -do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) -{ - struct irqaction *action; - const unsigned int cpu = smp_processor_id(); - - desc->triggered = 1; - - kstat_cpu(cpu).irqs[irq]++; - - smp_set_running(desc); - - action = desc->action; - if (action) { - int ret = __do_irq(irq, action, regs); - if (ret != IRQ_HANDLED) - report_bad_irq(irq, regs, desc, ret); - } - - smp_clear_running(desc); -} - -/* - * Most edge-triggered IRQ implementations seem to take a broken - * approach to this. Hence the complexity. - */ -void -do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) -{ - const unsigned int cpu = smp_processor_id(); - - desc->triggered = 1; - - /* - * If we're currently running this IRQ, or its disabled, - * we shouldn't process the IRQ. Instead, turn on the - * hardware masks. - */ - if (unlikely(desc->running || desc->disable_depth)) - goto running; - - /* - * Acknowledge and clear the IRQ, but don't mask it. - */ - desc->chip->ack(irq); - - /* - * Mark the IRQ currently in progress. - */ - desc->running = 1; - - kstat_cpu(cpu).irqs[irq]++; - - do { - struct irqaction *action; - - action = desc->action; - if (!action) - break; - - if (desc->pending && !desc->disable_depth) { - desc->pending = 0; - desc->chip->unmask(irq); - } - - __do_irq(irq, action, regs); - } while (desc->pending && !desc->disable_depth); - - desc->running = 0; - - /* - * If we were disabled or freed, shut down the handler. - */ - if (likely(desc->action && !check_irq_lock(desc, irq, regs))) - return; - - running: - /* - * We got another IRQ while this one was masked or - * currently running. Delay it. - */ - desc->pending = 1; - desc->chip->mask(irq); - desc->chip->ack(irq); -} - -/* - * Level-based IRQ handler. Nice and simple. - */ -void -do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) -{ - struct irqaction *action; - const unsigned int cpu = smp_processor_id(); - - desc->triggered = 1; - - /* - * Acknowledge, clear _AND_ disable the interrupt. - */ - desc->chip->ack(irq); - - if (likely(!desc->disable_depth)) { - kstat_cpu(cpu).irqs[irq]++; - - smp_set_running(desc); - - /* - * Return with this interrupt masked if no action - */ - action = desc->action; - if (action) { - int ret = __do_irq(irq, desc->action, regs); - - if (ret != IRQ_HANDLED) - report_bad_irq(irq, regs, desc, ret); - - if (likely(!desc->disable_depth && - !check_irq_lock(desc, irq, regs))) - desc->chip->unmask(irq); - } - - smp_clear_running(desc); - } -} - -static void do_pending_irqs(struct pt_regs *regs) -{ - struct list_head head, *l, *n; - - do { - struct irqdesc *desc; - - /* - * First, take the pending interrupts off the list. - * The act of calling the handlers may add some IRQs - * back onto the list. - */ - head = irq_pending; - INIT_LIST_HEAD(&irq_pending); - head.next->prev = &head; - head.prev->next = &head; - - /* - * Now run each entry. We must delete it from our - * list before calling the handler. - */ - list_for_each_safe(l, n, &head) { - desc = list_entry(l, struct irqdesc, pend); - list_del_init(&desc->pend); - desc_handle_irq(desc - irq_desc, desc, regs); - } - - /* - * The list must be empty. - */ - BUG_ON(!list_empty(&head)); - } while (!list_empty(&irq_pending)); -} +/* Handle bad interrupts */ +static struct irq_desc bad_irq_desc = { + .handle_irq = handle_bad_irq, + .lock = SPIN_LOCK_UNLOCKED +}; /* * do_IRQ handles all hardware IRQ's. Decoded IRQs should not @@ -550,96 +120,15 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) desc = &bad_irq_desc; irq_enter(); - spin_lock(&irq_controller_lock); - desc_handle_irq(irq, desc, regs); - /* - * Now re-run any pending interrupts. - */ - if (!list_empty(&irq_pending)) - do_pending_irqs(regs); + desc_handle_irq(irq, desc, regs); + /* AT91 specific workaround */ irq_finish(irq); - spin_unlock(&irq_controller_lock); irq_exit(); } -void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained) -{ - struct irqdesc *desc; - unsigned long flags; - - if (irq >= NR_IRQS) { - printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq); - return; - } - - if (handle == NULL) - handle = do_bad_IRQ; - - desc = irq_desc + irq; - - if (is_chained && desc->chip == &bad_chip) - printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq); - - spin_lock_irqsave(&irq_controller_lock, flags); - if (handle == do_bad_IRQ) { - desc->chip->mask(irq); - desc->chip->ack(irq); - desc->disable_depth = 1; - } - desc->handle = handle; - if (handle != do_bad_IRQ && is_chained) { - desc->valid = 0; - desc->probe_ok = 0; - desc->disable_depth = 0; - desc->chip->unmask(irq); - } - spin_unlock_irqrestore(&irq_controller_lock, flags); -} - -void set_irq_chip(unsigned int irq, struct irqchip *chip) -{ - struct irqdesc *desc; - unsigned long flags; - - if (irq >= NR_IRQS) { - printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq); - return; - } - - if (chip == NULL) - chip = &bad_chip; - - desc = irq_desc + irq; - spin_lock_irqsave(&irq_controller_lock, flags); - desc->chip = chip; - spin_unlock_irqrestore(&irq_controller_lock, flags); -} - -int set_irq_type(unsigned int irq, unsigned int type) -{ - struct irqdesc *desc; - unsigned long flags; - int ret = -ENXIO; - - if (irq >= NR_IRQS) { - printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); - return -ENODEV; - } - - desc = irq_desc + irq; - if (desc->chip->set_type) { - spin_lock_irqsave(&irq_controller_lock, flags); - ret = desc->chip->set_type(irq, type); - spin_unlock_irqrestore(&irq_controller_lock, flags); - } - - return ret; -} -EXPORT_SYMBOL(set_irq_type); - void set_irq_flags(unsigned int irq, unsigned int iflags) { struct irqdesc *desc; @@ -651,421 +140,32 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) } desc = irq_desc + irq; - spin_lock_irqsave(&irq_controller_lock, flags); - desc->valid = (iflags & IRQF_VALID) != 0; - desc->probe_ok = (iflags & IRQF_PROBE) != 0; - desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0; - spin_unlock_irqrestore(&irq_controller_lock, flags); -} - -int setup_irq(unsigned int irq, struct irqaction *new) -{ - int shared = 0; - struct irqaction *old, **p; - unsigned long flags; - struct irqdesc *desc; - - /* - * Some drivers like serial.c use request_irq() heavily, - * so we have to be careful not to interfere with a - * running system. - */ - if (new->flags & SA_SAMPLE_RANDOM) { - /* - * This function might sleep, we want to call it first, - * outside of the atomic block. - * Yes, this might clear the entropy pool if the wrong - * driver is attempted to be loaded, without actually - * installing a new handler, but is this really a problem, - * only the sysadmin is able to do this. - */ - rand_initialize_irq(irq); - } - - /* - * The following block of code has to be executed atomically - */ - desc = irq_desc + irq; - spin_lock_irqsave(&irq_controller_lock, flags); - p = &desc->action; - if ((old = *p) != NULL) { - /* - * Can't share interrupts unless both agree to and are - * the same type. - */ - if (!(old->flags & new->flags & SA_SHIRQ) || - (~old->flags & new->flags) & SA_TRIGGER_MASK) { - spin_unlock_irqrestore(&irq_controller_lock, flags); - return -EBUSY; - } - - /* add new interrupt at end of irq queue */ - do { - p = &old->next; - old = *p; - } while (old); - shared = 1; - } - - *p = new; - - if (!shared) { - desc->probing = 0; - desc->running = 0; - desc->pending = 0; - desc->disable_depth = 1; - - if (new->flags & SA_TRIGGER_MASK && - desc->chip->set_type) { - unsigned int type = new->flags & SA_TRIGGER_MASK; - desc->chip->set_type(irq, type); - } - - if (!desc->noautoenable) { - desc->disable_depth = 0; - desc->chip->unmask(irq); - } - } - - spin_unlock_irqrestore(&irq_controller_lock, flags); - return 0; -} - -/** - * request_irq - allocate an interrupt line - * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs - * @irqflags: Interrupt type flags - * @devname: An ascii name for the claiming device - * @dev_id: A cookie passed back to the handler function - * - * This call allocates interrupt resources and enables the - * interrupt line and IRQ handling. From the point this - * call is made your handler function may be invoked. Since - * your handler function must clear any interrupt the board - * raises, you must take care both to initialise your hardware - * and to set up the interrupt handler in the right order. - * - * Dev_id must be globally unique. Normally the address of the - * device data structure is used as the cookie. Since the handler - * receives this value it makes sense to use it. - * - * If your interrupt is shared you must pass a non NULL dev_id - * as this is required when freeing the interrupt. - * - * Flags: - * - * SA_SHIRQ Interrupt is shared - * - * SA_INTERRUPT Disable local interrupts while processing - * - * SA_SAMPLE_RANDOM The interrupt can be used for entropy - * - */ -int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), - unsigned long irq_flags, const char * devname, void *dev_id) -{ - unsigned long retval; - struct irqaction *action; - - if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler || - (irq_flags & SA_SHIRQ && !dev_id)) - return -EINVAL; - - action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); - if (!action) - return -ENOMEM; - - action->handler = handler; - action->flags = irq_flags; - cpus_clear(action->mask); - action->name = devname; - action->next = NULL; - action->dev_id = dev_id; - - retval = setup_irq(irq, action); - - if (retval) - kfree(action); - return retval; -} - -EXPORT_SYMBOL(request_irq); - -/** - * free_irq - free an interrupt - * @irq: Interrupt line to free - * @dev_id: Device identity to free - * - * Remove an interrupt handler. The handler is removed and if the - * interrupt line is no longer in use by any driver it is disabled. - * On a shared IRQ the caller must ensure the interrupt is disabled - * on the card it drives before calling this function. - * - * This function must not be called from interrupt context. - */ -void free_irq(unsigned int irq, void *dev_id) -{ - struct irqaction * action, **p; - unsigned long flags; - - if (irq >= NR_IRQS || !irq_desc[irq].valid) { - printk(KERN_ERR "Trying to free IRQ%d\n",irq); - dump_stack(); - return; - } - - spin_lock_irqsave(&irq_controller_lock, flags); - for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) { - if (action->dev_id != dev_id) - continue; - - /* Found it - now free it */ - *p = action->next; - break; - } - spin_unlock_irqrestore(&irq_controller_lock, flags); - - if (!action) { - printk(KERN_ERR "Trying to free free IRQ%d\n",irq); - dump_stack(); - } else { - synchronize_irq(irq); - kfree(action); - } -} - -EXPORT_SYMBOL(free_irq); - -static DECLARE_MUTEX(probe_sem); - -/* Start the interrupt probing. Unlike other architectures, - * we don't return a mask of interrupts from probe_irq_on, - * but return the number of interrupts enabled for the probe. - * The interrupts which have been enabled for probing is - * instead recorded in the irq_desc structure. - */ -unsigned long probe_irq_on(void) -{ - unsigned int i, irqs = 0; - unsigned long delay; - - down(&probe_sem); - - /* - * first snaffle up any unassigned but - * probe-able interrupts - */ - spin_lock_irq(&irq_controller_lock); - for (i = 0; i < NR_IRQS; i++) { - if (!irq_desc[i].probe_ok || irq_desc[i].action) - continue; - - irq_desc[i].probing = 1; - irq_desc[i].triggered = 0; - if (irq_desc[i].chip->set_type) - irq_desc[i].chip->set_type(i, IRQT_PROBE); - irq_desc[i].chip->unmask(i); - irqs += 1; - } - spin_unlock_irq(&irq_controller_lock); - - /* - * wait for spurious interrupts to mask themselves out again - */ - for (delay = jiffies + HZ/10; time_before(jiffies, delay); ) - /* min 100ms delay */; - - /* - * now filter out any obviously spurious interrupts - */ - spin_lock_irq(&irq_controller_lock); - for (i = 0; i < NR_IRQS; i++) { - if (irq_desc[i].probing && irq_desc[i].triggered) { - irq_desc[i].probing = 0; - irqs -= 1; - } - } - spin_unlock_irq(&irq_controller_lock); - - return irqs; -} - -EXPORT_SYMBOL(probe_irq_on); - -unsigned int probe_irq_mask(unsigned long irqs) -{ - unsigned int mask = 0, i; - - spin_lock_irq(&irq_controller_lock); - for (i = 0; i < 16 && i < NR_IRQS; i++) - if (irq_desc[i].probing && irq_desc[i].triggered) - mask |= 1 << i; - spin_unlock_irq(&irq_controller_lock); - - up(&probe_sem); - - return mask; -} -EXPORT_SYMBOL(probe_irq_mask); - -/* - * Possible return values: - * >= 0 - interrupt number - * -1 - no interrupt/many interrupts - */ -int probe_irq_off(unsigned long irqs) -{ - unsigned int i; - int irq_found = NO_IRQ; - - /* - * look at the interrupts, and find exactly one - * that we were probing has been triggered - */ - spin_lock_irq(&irq_controller_lock); - for (i = 0; i < NR_IRQS; i++) { - if (irq_desc[i].probing && - irq_desc[i].triggered) { - if (irq_found != NO_IRQ) { - irq_found = NO_IRQ; - goto out; - } - irq_found = i; - } - } - - if (irq_found == -1) - irq_found = NO_IRQ; -out: - spin_unlock_irq(&irq_controller_lock); - - up(&probe_sem); - - return irq_found; -} - -EXPORT_SYMBOL(probe_irq_off); - -#ifdef CONFIG_SMP -static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu) -{ - pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); - - spin_lock_irq(&irq_controller_lock); - desc->cpu = cpu; - desc->chip->set_cpu(desc, irq, cpu); - spin_unlock_irq(&irq_controller_lock); -} - -#ifdef CONFIG_PROC_FS -static int -irq_affinity_read_proc(char *page, char **start, off_t off, int count, - int *eof, void *data) -{ - struct irqdesc *desc = irq_desc + ((int)data); - int len = cpumask_scnprintf(page, count, desc->affinity); - - if (count - len < 2) - return -EINVAL; - page[len++] = '\n'; - page[len] = '\0'; - - return len; -} - -static int -irq_affinity_write_proc(struct file *file, const char __user *buffer, - unsigned long count, void *data) -{ - unsigned int irq = (unsigned int)data; - struct irqdesc *desc = irq_desc + irq; - cpumask_t affinity, tmp; - int ret = -EIO; - - if (!desc->chip->set_cpu) - goto out; - - ret = cpumask_parse(buffer, count, affinity); - if (ret) - goto out; - - cpus_and(tmp, affinity, cpu_online_map); - if (cpus_empty(tmp)) { - ret = -EINVAL; - goto out; - } - - desc->affinity = affinity; - route_irq(desc, irq, first_cpu(tmp)); - ret = count; - - out: - return ret; -} -#endif -#endif - -void __init init_irq_proc(void) -{ -#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) - struct proc_dir_entry *dir; - int irq; - - dir = proc_mkdir("irq", NULL); - if (!dir) - return; - - for (irq = 0; irq < NR_IRQS; irq++) { - struct proc_dir_entry *entry; - struct irqdesc *desc; - char name[16]; - - desc = irq_desc + irq; - memset(name, 0, sizeof(name)); - snprintf(name, sizeof(name) - 1, "%u", irq); - - desc->procdir = proc_mkdir(name, dir); - if (!desc->procdir) - continue; - - entry = create_proc_entry("smp_affinity", 0600, desc->procdir); - if (entry) { - entry->nlink = 1; - entry->data = (void *)irq; - entry->read_proc = irq_affinity_read_proc; - entry->write_proc = irq_affinity_write_proc; - } - } -#endif + spin_lock_irqsave(&desc->lock, flags); + desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + if (iflags & IRQF_VALID) + desc->status &= ~IRQ_NOREQUEST; + if (iflags & IRQF_PROBE) + desc->status &= ~IRQ_NOPROBE; + if (!(iflags & IRQF_NOAUTOEN)) + desc->status &= ~IRQ_NOAUTOEN; + spin_unlock_irqrestore(&desc->lock, flags); } void __init init_IRQ(void) { - struct irqdesc *desc; int irq; + for (irq = 0; irq < NR_IRQS; irq++) + irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_DELAYED_DISABLE | + IRQ_NOPROBE; + #ifdef CONFIG_SMP bad_irq_desc.affinity = CPU_MASK_ALL; bad_irq_desc.cpu = smp_processor_id(); #endif - - for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) { - *desc = bad_irq_desc; - INIT_LIST_HEAD(&desc->pend); - } - init_arch_irq(); } -static int __init noirqdebug_setup(char *str) -{ - noirqdebug = 1; - return 1; -} - -__setup("noirqdebug", noirqdebug_setup); - #ifdef CONFIG_HOTPLUG_CPU /* * The CPU has been marked offline. Migrate IRQs off this CPU. If diff --git a/arch/arm/kernel/iwmmxt-notifier.c b/arch/arm/kernel/iwmmxt-notifier.c new file mode 100644 index 00000000000..44a86c33796 --- /dev/null +++ b/arch/arm/kernel/iwmmxt-notifier.c @@ -0,0 +1,64 @@ +/* + * linux/arch/arm/kernel/iwmmxt-notifier.c + * + * XScale iWMMXt (Concan) context switching and handling + * + * Initial code: + * Copyright (c) 2003, Intel Corporation + * + * Full lazy switching support, optimizations and more, by Nicolas Pitre + * Copyright (c) 2003-2004, MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/config.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <asm/thread_notify.h> +#include <asm/io.h> + +static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) +{ + struct thread_info *thread = t; + + switch (cmd) { + case THREAD_NOTIFY_FLUSH: + /* + * flush_thread() zeroes thread->fpstate, so no need + * to do anything here. + * + * FALLTHROUGH: Ensure we don't try to overwrite our newly + * initialised state information on the first fault. + */ + + case THREAD_NOTIFY_RELEASE: + iwmmxt_task_release(thread); + break; + + case THREAD_NOTIFY_SWITCH: + iwmmxt_task_switch(thread); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block iwmmxt_notifier_block = { + .notifier_call = iwmmxt_do, +}; + +static int __init iwmmxt_init(void) +{ + thread_register_notifier(&iwmmxt_notifier_block); + + return 0; +} + +late_initcall(iwmmxt_init); diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index 24c7b0477a0..b63b528f22a 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S @@ -271,30 +271,27 @@ ENTRY(iwmmxt_task_restore) /* * Concan handling on task switch * - * r0 = previous task_struct pointer (must be preserved) - * r1 = previous thread_info pointer - * r2 = next thread_info.cpu_domain pointer (must be preserved) + * r0 = next thread_info pointer * - * Called only from __switch_to with task preemption disabled. - * No need to care about preserving r4 and above. + * Called only from the iwmmxt notifier with task preemption disabled. */ ENTRY(iwmmxt_task_switch) - mrc p15, 0, r4, c15, c1, 0 - tst r4, #0x3 @ CP0 and CP1 accessible? + mrc p15, 0, r1, c15, c1, 0 + tst r1, #0x3 @ CP0 and CP1 accessible? bne 1f @ yes: block them for next task - ldr r5, =concan_owner - add r6, r2, #(TI_IWMMXT_STATE - TI_CPU_DOMAIN) @ get next task Concan save area - ldr r5, [r5] @ get current Concan owner - teq r5, r6 @ next task owns it? + ldr r2, =concan_owner + add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area + ldr r2, [r2] @ get current Concan owner + teq r2, r3 @ next task owns it? movne pc, lr @ no: leave Concan disabled -1: eor r4, r4, #3 @ flip Concan access - mcr p15, 0, r4, c15, c1, 0 +1: eor r1, r1, #3 @ flip Concan access + mcr p15, 0, r1, c15, c1, 0 - mrc p15, 0, r4, c2, c0, 0 - sub pc, lr, r4, lsr #32 @ cpwait and return + mrc p15, 0, r1, c2, c0, 0 + sub pc, lr, r1, lsr #32 @ cpwait and return /* * Remove Concan ownership of given task diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 055bf5d2889..298363d9704 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -9,7 +9,6 @@ * * Module allocation method suggested by Andi Kleen. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/moduleloader.h> #include <linux/kernel.h> diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 7df6e1aaa32..3079535afcc 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -10,7 +10,6 @@ */ #include <stdarg.h> -#include <linux/config.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -28,10 +27,12 @@ #include <linux/init.h> #include <linux/cpu.h> #include <linux/elfcore.h> +#include <linux/pm.h> #include <asm/leds.h> #include <asm/processor.h> #include <asm/system.h> +#include <asm/thread_notify.h> #include <asm/uaccess.h> #include <asm/mach/time.h> @@ -71,8 +72,36 @@ static int __init hlt_setup(char *__unused) __setup("nohlt", nohlt_setup); __setup("hlt", hlt_setup); +void arm_machine_restart(char mode) +{ + /* + * Clean and disable cache, and turn off interrupts + */ + cpu_proc_fin(); + + /* + * Tell the mm system that we are going to reboot - + * we may need it to insert some 1:1 mappings so that + * soft boot works. + */ + setup_mm_for_reboot(mode); + + /* + * Now call the architecture specific reboot code. + */ + arch_reset(mode); + + /* + * Whoops - the architecture was unable to reboot. + * Tell the user! + */ + mdelay(1000); + printk("Reboot failed -- System halted\n"); + while (1); +} + /* - * The following aren't currently used. + * Function pointers to optional machine specific functions */ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); @@ -80,6 +109,10 @@ EXPORT_SYMBOL(pm_idle); void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); +void (*arm_pm_restart)(char str) = arm_machine_restart; +EXPORT_SYMBOL_GPL(arm_pm_restart); + + /* * This is our default idle handler. We need to disable * interrupts here to ensure we don't miss a wakeup call. @@ -151,33 +184,9 @@ void machine_power_off(void) pm_power_off(); } - void machine_restart(char * __unused) { - /* - * Clean and disable cache, and turn off interrupts - */ - cpu_proc_fin(); - - /* - * Tell the mm system that we are going to reboot - - * we may need it to insert some 1:1 mappings so that - * soft boot works. - */ - setup_mm_for_reboot(reboot_mode); - - /* - * Now call the architecture specific reboot code. - */ - arch_reset(reboot_mode); - - /* - * Whoops - the architecture was unable to reboot. - * Tell the user! - */ - mdelay(1000); - printk("Reboot failed -- System halted\n"); - while (1); + arm_pm_restart(reboot_mode); } void __show_regs(struct pt_regs *regs) @@ -329,13 +338,9 @@ void exit_thread(void) { } -static void default_fp_init(union fp_state *fp) -{ - memset(fp, 0, sizeof(union fp_state)); -} +ATOMIC_NOTIFIER_HEAD(thread_notify_head); -void (*fp_init)(union fp_state *) = default_fp_init; -EXPORT_SYMBOL(fp_init); +EXPORT_SYMBOL_GPL(thread_notify_head); void flush_thread(void) { @@ -344,23 +349,16 @@ void flush_thread(void) memset(thread->used_cp, 0, sizeof(thread->used_cp)); memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); -#if defined(CONFIG_IWMMXT) - iwmmxt_task_release(thread); -#endif - fp_init(&thread->fpstate); -#if defined(CONFIG_VFP) - vfp_flush_thread(&thread->vfpstate); -#endif + memset(&thread->fpstate, 0, sizeof(union fp_state)); + + thread_notify(THREAD_NOTIFY_FLUSH, thread); } void release_thread(struct task_struct *dead_task) { -#if defined(CONFIG_VFP) - vfp_release_thread(&task_thread_info(dead_task)->vfpstate); -#endif -#if defined(CONFIG_IWMMXT) - iwmmxt_task_release(task_thread_info(dead_task)); -#endif + struct thread_info *thread = task_thread_info(dead_task); + + thread_notify(THREAD_NOTIFY_RELEASE, thread); } asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index a1d1b2906e8..9254ba2f46f 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -9,7 +9,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> @@ -634,6 +633,32 @@ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) #endif +#ifdef CONFIG_CRUNCH +/* + * Get the child Crunch state. + */ +static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp) +{ + struct thread_info *thread = task_thread_info(tsk); + + crunch_task_disable(thread); /* force it to ram */ + return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE) + ? -EFAULT : 0; +} + +/* + * Set the child Crunch state. + */ +static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp) +{ + struct thread_info *thread = task_thread_info(tsk); + + crunch_task_release(thread); /* force a reload */ + return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE) + ? -EFAULT : 0; +} +#endif + long arch_ptrace(struct task_struct *child, long request, long addr, long data) { unsigned long tmp; @@ -765,6 +790,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) child->ptrace_message = data; break; +#ifdef CONFIG_CRUNCH + case PTRACE_GETCRUNCHREGS: + ret = ptrace_getcrunchregs(child, (void __user *)data); + break; + + case PTRACE_SETCRUNCHREGS: + ret = ptrace_setcrunchregs(child, (void __user *)data); + break; +#endif + default: ret = ptrace_request(child, request, addr, data); break; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 9fc9af88c60..7d6a516c0b9 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -7,7 +7,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/stddef.h> @@ -119,9 +118,24 @@ DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); * Standard memory resources */ static struct resource mem_res[] = { - { "Video RAM", 0, 0, IORESOURCE_MEM }, - { "Kernel text", 0, 0, IORESOURCE_MEM }, - { "Kernel data", 0, 0, IORESOURCE_MEM } + { + .name = "Video RAM", + .start = 0, + .end = 0, + .flags = IORESOURCE_MEM + }, + { + .name = "Kernel text", + .start = 0, + .end = 0, + .flags = IORESOURCE_MEM + }, + { + .name = "Kernel data", + .start = 0, + .end = 0, + .flags = IORESOURCE_MEM + } }; #define video_ram mem_res[0] @@ -129,9 +143,24 @@ static struct resource mem_res[] = { #define kernel_data mem_res[2] static struct resource io_res[] = { - { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY }, - { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY }, - { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY } + { + .name = "reserved", + .start = 0x3bc, + .end = 0x3be, + .flags = IORESOURCE_IO | IORESOURCE_BUSY + }, + { + .name = "reserved", + .start = 0x378, + .end = 0x37f, + .flags = IORESOURCE_IO | IORESOURCE_BUSY + }, + { + .name = "reserved", + .start = 0x278, + .end = 0x27f, + .flags = IORESOURCE_IO | IORESOURCE_BUSY + } }; #define lp0 io_res[0] @@ -315,9 +344,9 @@ static void __init setup_processor(void) cpu_cache = *list->cache; #endif - printk("CPU: %s [%08x] revision %d (ARMv%s)\n", + printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08x\n", cpu_name, processor_id, (int)processor_id & 15, - proc_arch[cpu_architecture()]); + proc_arch[cpu_architecture()], cr_alignment); sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS); sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); @@ -808,7 +837,7 @@ static int __init topology_init(void) int cpu; for_each_possible_cpu(cpu) - register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL); + register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu); return 0; } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index a0cd0a90a10..48cf7fffddf 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -7,7 +7,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/config.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/ptrace.h> @@ -132,18 +131,38 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, return ret; } -#ifdef CONFIG_IWMMXT +#ifdef CONFIG_CRUNCH +static int preserve_crunch_context(struct crunch_sigframe *frame) +{ + char kbuf[sizeof(*frame) + 8]; + struct crunch_sigframe *kframe; -/* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */ -#define IWMMXT_STORAGE_SIZE (0x98 + 8) -#define IWMMXT_MAGIC0 0x12ef842a -#define IWMMXT_MAGIC1 0x1c07ca71 + /* the crunch context must be 64 bit aligned */ + kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); + kframe->magic = CRUNCH_MAGIC; + kframe->size = CRUNCH_STORAGE_SIZE; + crunch_task_copy(current_thread_info(), &kframe->storage); + return __copy_to_user(frame, kframe, sizeof(*frame)); +} -struct iwmmxt_sigframe { - unsigned long magic0; - unsigned long magic1; - unsigned long storage[0x98/4]; -}; +static int restore_crunch_context(struct crunch_sigframe *frame) +{ + char kbuf[sizeof(*frame) + 8]; + struct crunch_sigframe *kframe; + + /* the crunch context must be 64 bit aligned */ + kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); + if (__copy_from_user(kframe, frame, sizeof(*frame))) + return -1; + if (kframe->magic != CRUNCH_MAGIC || + kframe->size != CRUNCH_STORAGE_SIZE) + return -1; + crunch_task_restore(current_thread_info(), &kframe->storage); + return 0; +} +#endif + +#ifdef CONFIG_IWMMXT static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) { @@ -152,8 +171,8 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) /* the iWMMXt context must be 64 bit aligned */ kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); - kframe->magic0 = IWMMXT_MAGIC0; - kframe->magic1 = IWMMXT_MAGIC1; + kframe->magic = IWMMXT_MAGIC; + kframe->size = IWMMXT_STORAGE_SIZE; iwmmxt_task_copy(current_thread_info(), &kframe->storage); return __copy_to_user(frame, kframe, sizeof(*frame)); } @@ -167,8 +186,8 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); if (__copy_from_user(kframe, frame, sizeof(*frame))) return -1; - if (kframe->magic0 != IWMMXT_MAGIC0 || - kframe->magic1 != IWMMXT_MAGIC1) + if (kframe->magic != IWMMXT_MAGIC || + kframe->size != IWMMXT_STORAGE_SIZE) return -1; iwmmxt_task_restore(current_thread_info(), &kframe->storage); return 0; @@ -177,70 +196,65 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) #endif /* - * Auxiliary signal frame. This saves stuff like FP state. - * The layout of this structure is not part of the user ABI. - */ -struct aux_sigframe { -#ifdef CONFIG_IWMMXT - struct iwmmxt_sigframe iwmmxt; -#endif -#ifdef CONFIG_VFP - union vfp_state vfp; -#endif -}; - -/* * Do a signal return; undo the signal stack. These are aligned to 64-bit. */ struct sigframe { - struct sigcontext sc; - unsigned long extramask[_NSIG_WORDS-1]; + struct ucontext uc; unsigned long retcode[2]; - struct aux_sigframe aux __attribute__((aligned(8))); }; struct rt_sigframe { - struct siginfo __user *pinfo; - void __user *puc; struct siginfo info; - struct ucontext uc; - unsigned long retcode[2]; - struct aux_sigframe aux __attribute__((aligned(8))); + struct sigframe sig; }; -static int -restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, - struct aux_sigframe __user *aux) +static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) { - int err = 0; + struct aux_sigframe __user *aux; + sigset_t set; + int err; - __get_user_error(regs->ARM_r0, &sc->arm_r0, err); - __get_user_error(regs->ARM_r1, &sc->arm_r1, err); - __get_user_error(regs->ARM_r2, &sc->arm_r2, err); - __get_user_error(regs->ARM_r3, &sc->arm_r3, err); - __get_user_error(regs->ARM_r4, &sc->arm_r4, err); - __get_user_error(regs->ARM_r5, &sc->arm_r5, err); - __get_user_error(regs->ARM_r6, &sc->arm_r6, err); - __get_user_error(regs->ARM_r7, &sc->arm_r7, err); - __get_user_error(regs->ARM_r8, &sc->arm_r8, err); - __get_user_error(regs->ARM_r9, &sc->arm_r9, err); - __get_user_error(regs->ARM_r10, &sc->arm_r10, err); - __get_user_error(regs->ARM_fp, &sc->arm_fp, err); - __get_user_error(regs->ARM_ip, &sc->arm_ip, err); - __get_user_error(regs->ARM_sp, &sc->arm_sp, err); - __get_user_error(regs->ARM_lr, &sc->arm_lr, err); - __get_user_error(regs->ARM_pc, &sc->arm_pc, err); - __get_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err); + err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); + if (err == 0) { + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + } + + __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); + __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); + __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); + __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); + __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); + __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); + __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); + __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); + __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); + __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); + __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); + __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); + __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); + __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); + __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); + __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); + __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); err |= !valid_user_regs(regs); + aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; +#ifdef CONFIG_CRUNCH + if (err == 0) + err |= restore_crunch_context(&aux->crunch); +#endif #ifdef CONFIG_IWMMXT if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) err |= restore_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP // if (err == 0) -// err |= vfp_restore_state(&aux->vfp); +// err |= vfp_restore_state(&sf->aux.vfp); #endif return err; @@ -249,7 +263,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, asmlinkage int sys_sigreturn(struct pt_regs *regs) { struct sigframe __user *frame; - sigset_t set; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; @@ -266,19 +279,8 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) goto badframe; - if (__get_user(set.sig[0], &frame->sc.oldmask) - || (_NSIG_WORDS > 1 - && __copy_from_user(&set.sig[1], &frame->extramask, - sizeof(frame->extramask)))) - goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - if (restore_sigcontext(regs, &frame->sc, &frame->aux)) + if (restore_sigframe(regs, frame)) goto badframe; /* Send SIGTRAP if we're single-stepping */ @@ -297,7 +299,6 @@ badframe: asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; - sigset_t set; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; @@ -314,19 +315,11 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) goto badframe; - if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) - goto badframe; - - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &frame->aux)) + if (restore_sigframe(regs, &frame->sig)) goto badframe; - if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) + if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) goto badframe; /* Send SIGTRAP if we're single-stepping */ @@ -343,42 +336,50 @@ badframe: } static int -setup_sigcontext(struct sigcontext __user *sc, struct aux_sigframe __user *aux, - struct pt_regs *regs, unsigned long mask) +setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { + struct aux_sigframe __user *aux; int err = 0; - __put_user_error(regs->ARM_r0, &sc->arm_r0, err); - __put_user_error(regs->ARM_r1, &sc->arm_r1, err); - __put_user_error(regs->ARM_r2, &sc->arm_r2, err); - __put_user_error(regs->ARM_r3, &sc->arm_r3, err); - __put_user_error(regs->ARM_r4, &sc->arm_r4, err); - __put_user_error(regs->ARM_r5, &sc->arm_r5, err); - __put_user_error(regs->ARM_r6, &sc->arm_r6, err); - __put_user_error(regs->ARM_r7, &sc->arm_r7, err); - __put_user_error(regs->ARM_r8, &sc->arm_r8, err); - __put_user_error(regs->ARM_r9, &sc->arm_r9, err); - __put_user_error(regs->ARM_r10, &sc->arm_r10, err); - __put_user_error(regs->ARM_fp, &sc->arm_fp, err); - __put_user_error(regs->ARM_ip, &sc->arm_ip, err); - __put_user_error(regs->ARM_sp, &sc->arm_sp, err); - __put_user_error(regs->ARM_lr, &sc->arm_lr, err); - __put_user_error(regs->ARM_pc, &sc->arm_pc, err); - __put_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err); - - __put_user_error(current->thread.trap_no, &sc->trap_no, err); - __put_user_error(current->thread.error_code, &sc->error_code, err); - __put_user_error(current->thread.address, &sc->fault_address, err); - __put_user_error(mask, &sc->oldmask, err); - + __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); + __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); + __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); + __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); + __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); + __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); + __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); + __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); + __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); + __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); + __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); + __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); + __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); + __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); + __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); + __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); + __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); + + __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); + __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); + __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); + __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); + + err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); + + aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; +#ifdef CONFIG_CRUNCH + if (err == 0) + err |= preserve_crunch_context(&aux->crunch); +#endif #ifdef CONFIG_IWMMXT if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) err |= preserve_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP // if (err == 0) -// err |= vfp_save_state(&aux->vfp); +// err |= vfp_save_state(&sf->aux.vfp); #endif + __put_user_error(0, &aux->end_magic, err); return err; } @@ -487,13 +488,12 @@ setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *reg if (!frame) return 1; - err |= setup_sigcontext(&frame->sc, &frame->aux, regs, set->sig[0]); - - if (_NSIG_WORDS > 1) { - err |= __copy_to_user(frame->extramask, &set->sig[1], - sizeof(frame->extramask)); - } + /* + * Set uc.uc_flags to a value which sc.trap_no would never have. + */ + __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); + err |= setup_sigframe(frame, regs, set); if (err == 0) err = setup_return(regs, ka, frame->retcode, frame, usig); @@ -511,25 +511,20 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, if (!frame) return 1; - __put_user_error(&frame->info, &frame->pinfo, err); - __put_user_error(&frame->uc, &frame->puc, err); err |= copy_siginfo_to_user(&frame->info, info); - __put_user_error(0, &frame->uc.uc_flags, err); - __put_user_error(NULL, &frame->uc.uc_link, err); + __put_user_error(0, &frame->sig.uc.uc_flags, err); + __put_user_error(NULL, &frame->sig.uc.uc_link, err); memset(&stack, 0, sizeof(stack)); stack.ss_sp = (void __user *)current->sas_ss_sp; stack.ss_flags = sas_ss_flags(regs->ARM_sp); stack.ss_size = current->sas_ss_size; - err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack)); - - err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->aux, - regs, set->sig[0]); - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); + err |= setup_sigframe(&frame->sig, regs, set); if (err == 0) - err = setup_return(regs, ka, frame->retcode, frame, usig); + err = setup_return(regs, ka, frame->sig.retcode, frame, usig); if (err == 0) { /* @@ -538,7 +533,7 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06 */ regs->ARM_r1 = (unsigned long)&frame->info; - regs->ARM_r2 = (unsigned long)&frame->uc; + regs->ARM_r2 = (unsigned long)&frame->sig.uc; } return err; @@ -665,17 +660,33 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) if (syscall) { if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { if (thumb_mode(regs)) { - regs->ARM_r7 = __NR_restart_syscall; + regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; regs->ARM_pc -= 2; } else { +#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT) + regs->ARM_r7 = __NR_restart_syscall; + regs->ARM_pc -= 4; +#else u32 __user *usp; + u32 swival = __NR_restart_syscall; regs->ARM_sp -= 12; usp = (u32 __user *)regs->ARM_sp; + /* + * Either we supports OABI only, or we have + * EABI with the OABI compat layer enabled. + * In the later case we don't know if user + * space is EABI or not, and if not we must + * not clobber r7. Always using the OABI + * syscall solves that issue and works for + * all those cases. + */ + swival = swival - __NR_SYSCALL_BASE + __NR_OABI_SYSCALL_BASE; + put_user(regs->ARM_pc, &usp[0]); /* swi __NR_restart_syscall */ - put_user(0xef000000 | __NR_restart_syscall, &usp[1]); + put_user(0xef000000 | swival, &usp[1]); /* ldr pc, [sp], #12 */ put_user(0xe49df00c, &usp[2]); @@ -683,6 +694,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) (unsigned long)(usp + 3)); regs->ARM_pc = regs->ARM_sp + 4; +#endif } } if (regs->ARM_r0 == -ERESTARTNOHAND || diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 1370d726dc1..68e9634d260 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -7,7 +7,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/config.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index d6bd435a685..09a67d77185 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -16,7 +16,6 @@ * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 * "A Kernel Model for Precision Timekeeping" by Dave Mills */ -#include <linux/config.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/interrupt.h> @@ -379,7 +378,7 @@ static int timer_dyn_tick_enable(void) int ret = -ENODEV; if (dyn_tick) { - write_seqlock_irqsave(&xtime_lock, flags); + spin_lock_irqsave(&dyn_tick->lock, flags); ret = 0; if (!(dyn_tick->state & DYN_TICK_ENABLED)) { ret = dyn_tick->enable(); @@ -387,7 +386,7 @@ static int timer_dyn_tick_enable(void) if (ret == 0) dyn_tick->state |= DYN_TICK_ENABLED; } - write_sequnlock_irqrestore(&xtime_lock, flags); + spin_unlock_irqrestore(&dyn_tick->lock, flags); } return ret; @@ -400,7 +399,7 @@ static int timer_dyn_tick_disable(void) int ret = -ENODEV; if (dyn_tick) { - write_seqlock_irqsave(&xtime_lock, flags); + spin_lock_irqsave(&dyn_tick->lock, flags); ret = 0; if (dyn_tick->state & DYN_TICK_ENABLED) { ret = dyn_tick->disable(); @@ -408,7 +407,7 @@ static int timer_dyn_tick_disable(void) if (ret == 0) dyn_tick->state &= ~DYN_TICK_ENABLED; } - write_sequnlock_irqrestore(&xtime_lock, flags); + spin_unlock_irqrestore(&dyn_tick->lock, flags); } return ret; @@ -422,15 +421,20 @@ static int timer_dyn_tick_disable(void) void timer_dyn_reprogram(void) { struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; - unsigned long next, seq; + unsigned long next, seq, flags; - if (dyn_tick && (dyn_tick->state & DYN_TICK_ENABLED)) { + if (!dyn_tick) + return; + + spin_lock_irqsave(&dyn_tick->lock, flags); + if (dyn_tick->state & DYN_TICK_ENABLED) { next = next_timer_interrupt(); do { seq = read_seqbegin(&xtime_lock); - dyn_tick->reprogram(next_timer_interrupt() - jiffies); + dyn_tick->reprogram(next - jiffies); } while (read_seqretry(&xtime_lock, seq)); } + spin_unlock_irqrestore(&dyn_tick->lock, flags); } static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) @@ -499,5 +503,10 @@ void __init time_init(void) if (system_timer->offset == NULL) system_timer->offset = dummy_gettimeoffset; system_timer->init(); + +#ifdef CONFIG_NO_IDLE_HZ + if (system_timer->dyn_tick) + system_timer->dyn_tick->lock = SPIN_LOCK_UNLOCKED; +#endif } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 35230a06010..35a052fc177 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -12,7 +12,6 @@ * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably * kill the offending process. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/spinlock.h> diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 2b254e88595..3ca574ee277 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -4,7 +4,6 @@ */ #include <asm-generic/vmlinux.lds.h> -#include <linux/config.h> #include <asm/thread_info.h> #include <asm/memory.h> @@ -80,6 +79,10 @@ SECTIONS *(.exit.text) *(.exit.data) *(.exitcall.exit) +#ifndef CONFIG_MMU + *(.fixup) + *(__ex_table) +#endif } .text : { /* Real text segment */ @@ -87,7 +90,9 @@ SECTIONS *(.text) SCHED_TEXT LOCK_TEXT +#ifdef CONFIG_MMU *(.fixup) +#endif *(.gnu.warning) *(.rodata) *(.rodata.*) @@ -142,7 +147,9 @@ SECTIONS */ . = ALIGN(32); __start___ex_table = .; +#ifdef CONFIG_MMU *(__ex_table) +#endif __stop___ex_table = .; /* |