diff options
Diffstat (limited to 'arch/arm/kernel')
28 files changed, 603 insertions, 676 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 185ee822c93..74554f1742d 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o +obj-$(CONFIG_PM) += sleep.o obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_SMP) += smp.o smp_tlb.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index e5e1e538767..acca35aebe2 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -140,24 +140,18 @@ EXPORT_SYMBOL(__aeabi_ulcmp); #endif /* bitops */ -EXPORT_SYMBOL(_set_bit_le); -EXPORT_SYMBOL(_test_and_set_bit_le); -EXPORT_SYMBOL(_clear_bit_le); -EXPORT_SYMBOL(_test_and_clear_bit_le); -EXPORT_SYMBOL(_change_bit_le); -EXPORT_SYMBOL(_test_and_change_bit_le); +EXPORT_SYMBOL(_set_bit); +EXPORT_SYMBOL(_test_and_set_bit); +EXPORT_SYMBOL(_clear_bit); +EXPORT_SYMBOL(_test_and_clear_bit); +EXPORT_SYMBOL(_change_bit); +EXPORT_SYMBOL(_test_and_change_bit); EXPORT_SYMBOL(_find_first_zero_bit_le); EXPORT_SYMBOL(_find_next_zero_bit_le); EXPORT_SYMBOL(_find_first_bit_le); EXPORT_SYMBOL(_find_next_bit_le); #ifdef __ARMEB__ -EXPORT_SYMBOL(_set_bit_be); -EXPORT_SYMBOL(_test_and_set_bit_be); -EXPORT_SYMBOL(_clear_bit_be); -EXPORT_SYMBOL(_test_and_clear_bit_be); -EXPORT_SYMBOL(_change_bit_be); -EXPORT_SYMBOL(_test_and_change_bit_be); EXPORT_SYMBOL(_find_first_zero_bit_be); EXPORT_SYMBOL(_find_next_zero_bit_be); EXPORT_SYMBOL(_find_first_bit_be); @@ -170,3 +164,7 @@ EXPORT_SYMBOL(mcount); #endif EXPORT_SYMBOL(__gnu_mcount_nc); #endif + +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +EXPORT_SYMBOL(__pv_phys_offset); +#endif diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 82da6617213..927522cfc12 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -13,6 +13,9 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/dma-mapping.h> +#include <asm/cacheflush.h> +#include <asm/glue-df.h> +#include <asm/glue-pf.h> #include <asm/mach/arch.h> #include <asm/thread_info.h> #include <asm/memory.h> @@ -114,6 +117,14 @@ int main(void) #ifdef MULTI_PABORT DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); #endif +#ifdef MULTI_CPU + DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size)); + DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend)); + DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume)); +#endif +#ifdef MULTI_CACHE + DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); +#endif BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index c6273a3bfc2..d86fcd44b22 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -583,6 +583,11 @@ void __init pci_common_init(struct hw_pci *hw) * Assign resources. */ pci_bus_assign_resources(bus); + + /* + * Enable bridges + */ + pci_enable_bridges(bus); } /* diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index a0f07521ca8..d2d983be096 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -25,7 +25,7 @@ .macro addruart, rp, rv .endm -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) .macro senduart, rd, rx mcr p14, 0, \rd, c0, c5, 0 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 2b46fea36c9..e8d88567680 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -16,7 +16,8 @@ */ #include <asm/memory.h> -#include <asm/glue.h> +#include <asm/glue-df.h> +#include <asm/glue-pf.h> #include <asm/vfpmacros.h> #include <mach/entry-macro.S> #include <asm/thread_notify.h> diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index ae946490016..051166c2a93 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -76,13 +76,13 @@ #ifndef CONFIG_THUMB2_KERNEL .macro svc_exit, rpsr msr spsr_cxsf, \rpsr -#if defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr -#elif defined (CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) ldr r0, [sp] strex r1, r2, [sp] @ clear the exclusive monitor ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr +#elif defined(CONFIG_CPU_32v6K) + clrex @ clear the exclusive monitor + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr #else ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr #endif @@ -92,10 +92,10 @@ ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC]! @ get pc msr spsr_cxsf, r1 @ save in spsr_svc -#if defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor -#elif defined (CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) strex r1, r2, [sp] @ clear the exclusive monitor +#elif defined(CONFIG_CPU_32v6K) + clrex @ clear the exclusive monitor #endif .if \fast ldmdb sp, {r1 - lr}^ @ get calling r1 - lr diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 8f57515bbdb..c84b57d27d0 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -25,83 +25,6 @@ * machine ID for example). */ __HEAD -__error_a: -#ifdef CONFIG_DEBUG_LL - mov r4, r1 @ preserve machine ID - adr r0, str_a1 - bl printascii - mov r0, r4 - bl printhex8 - adr r0, str_a2 - bl printascii - adr r3, __lookup_machine_type_data - ldmia r3, {r4, r5, r6} @ get machine desc list - sub r4, r3, r4 @ get offset between virt&phys - add r5, r5, r4 @ convert virt addresses to - add r6, r6, r4 @ physical address space -1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type - bl printhex8 - mov r0, #'\t' - bl printch - ldr r0, [r5, #MACHINFO_NAME] @ get machine name - add r0, r0, r4 - bl printascii - mov r0, #'\n' - bl printch - add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc - cmp r5, r6 - blo 1b - adr r0, str_a3 - bl printascii - b __error -ENDPROC(__error_a) - -str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x" -str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n" -str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" - .align -#else - b __error -#endif - -/* - * Lookup machine architecture in the linker-build list of architectures. - * Note that we can't use the absolute addresses for the __arch_info - * lists since we aren't running with the MMU on (and therefore, we are - * not in the correct address space). We have to calculate the offset. - * - * r1 = machine architecture number - * Returns: - * r3, r4, r6 corrupted - * r5 = mach_info pointer in physical address space - */ -__lookup_machine_type: - adr r3, __lookup_machine_type_data - ldmia r3, {r4, r5, r6} - sub r3, r3, r4 @ get offset between virt&phys - add r5, r5, r3 @ convert virt addresses to - add r6, r6, r3 @ physical address space -1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type - teq r3, r1 @ matches loader number? - beq 2f @ found - add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc - cmp r5, r6 - blo 1b - mov r5, #0 @ unknown machine -2: mov pc, lr -ENDPROC(__lookup_machine_type) - -/* - * Look in arch/arm/kernel/arch.[ch] for information about the - * __arch_info structures. - */ - .align 2 - .type __lookup_machine_type_data, %object -__lookup_machine_type_data: - .long . - .long __arch_info_begin - .long __arch_info_end - .size __lookup_machine_type_data, . - __lookup_machine_type_data /* Determine validity of the r2 atags pointer. The heuristic requires * that the pointer be aligned, in the first 16k of physical RAM and @@ -109,8 +32,6 @@ __lookup_machine_type_data: * of this function may be more lenient with the physical address and * may also be able to move the ATAGS block if necessary. * - * r8 = machinfo - * * Returns: * r2 either valid atags pointer, or zero * r5, r6 corrupted @@ -185,17 +106,6 @@ __mmap_switched_data: .size __mmap_switched_data, . - __mmap_switched_data /* - * This provides a C-API version of __lookup_machine_type - */ -ENTRY(lookup_machine_type) - stmfd sp!, {r4 - r6, lr} - mov r1, r0 - bl __lookup_machine_type - mov r0, r5 - ldmfd sp!, {r4 - r6, pc} -ENDPROC(lookup_machine_type) - -/* * This provides a C-API version of __lookup_processor_type */ ENTRY(lookup_processor_type) diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 814ce1a7327..6b1e0ad9ec3 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -44,9 +44,6 @@ ENTRY(stext) bl __lookup_processor_type @ r5=procinfo r9=cpuid movs r10, r5 @ invalid processor (r5=0)? beq __error_p @ yes, error 'p' - bl __lookup_machine_type @ r5=machinfo - movs r8, r5 @ invalid machine (r5=0)? - beq __error_a @ yes, error 'a' adr lr, BSYM(__after_proc_init) @ return (PIC) address ARM( add pc, r10, #PROCINFO_INITFUNC ) diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index c0225da3fb2..c9173cfbbc7 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -26,14 +26,6 @@ #include <mach/debug-macro.S> #endif -#if (PHYS_OFFSET & 0x001fffff) -#error "PHYS_OFFSET must be at an even 2MiB boundary!" -#endif - -#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) -#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET) - - /* * swapper_pg_dir is the virtual address of the initial page table. * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must @@ -41,6 +33,7 @@ * the least significant 16 bits to be 0x8000, but we could probably * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. */ +#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 #error KERNEL_RAM_VADDR must start at 0xXXXX8000 #endif @@ -48,8 +41,8 @@ .globl swapper_pg_dir .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000 - .macro pgtbl, rd - ldr \rd, =(KERNEL_RAM_PADDR - 0x4000) + .macro pgtbl, rd, phys + add \rd, \phys, #TEXT_OFFSET - 0x4000 .endm #ifdef CONFIG_XIP_KERNEL @@ -87,25 +80,33 @@ ENTRY(stext) movs r10, r5 @ invalid processor (r5=0)? THUMB( it eq ) @ force fixup-able long branch encoding beq __error_p @ yes, error 'p' - bl __lookup_machine_type @ r5=machinfo - movs r8, r5 @ invalid machine (r5=0)? - THUMB( it eq ) @ force fixup-able long branch encoding - beq __error_a @ yes, error 'a' + +#ifndef CONFIG_XIP_KERNEL + adr r3, 2f + ldmia r3, {r4, r8} + sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) + add r8, r8, r4 @ PHYS_OFFSET +#else + ldr r8, =PLAT_PHYS_OFFSET +#endif /* * r1 = machine no, r2 = atags, - * r8 = machinfo, r9 = cpuid, r10 = procinfo + * r8 = phys_offset, r9 = cpuid, r10 = procinfo */ bl __vet_atags #ifdef CONFIG_SMP_ON_UP bl __fixup_smp #endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + bl __fixup_pv_table +#endif bl __create_page_tables /* * The following calls CPU specific code in a position independent * manner. See arch/arm/mm/proc-*.S for details. r10 = base of - * xxx_proc_info structure selected by __lookup_machine_type + * xxx_proc_info structure selected by __lookup_processor_type * above. On return, the CPU will be ready for the MMU to be * turned on, and r0 will hold the CPU control register value. */ @@ -118,22 +119,24 @@ ENTRY(stext) 1: b __enable_mmu ENDPROC(stext) .ltorg +#ifndef CONFIG_XIP_KERNEL +2: .long . + .long PAGE_OFFSET +#endif /* * Setup the initial page tables. We only setup the barest * amount which are required to get the kernel running, which * generally means mapping in the kernel code. * - * r8 = machinfo - * r9 = cpuid - * r10 = procinfo + * r8 = phys_offset, r9 = cpuid, r10 = procinfo * * Returns: * r0, r3, r5-r7 corrupted * r4 = physical page table address */ __create_page_tables: - pgtbl r4 @ page table address + pgtbl r4, r8 @ page table address /* * Clear the 16K level 1 swapper page table @@ -189,10 +192,8 @@ __create_page_tables: /* * Map some ram to cover our .data and .bss areas. */ - orr r3, r7, #(KERNEL_RAM_PADDR & 0xff000000) - .if (KERNEL_RAM_PADDR & 0x00f00000) - orr r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000) - .endif + add r3, r8, #TEXT_OFFSET + orr r3, r3, r7 add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18 str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]! ldr r6, =(_end - 1) @@ -205,14 +206,17 @@ __create_page_tables: #endif /* - * Then map first 1MB of ram in case it contains our boot params. + * Then map boot params address in r2 or + * the first 1MB of ram if boot params address is not specified. */ - add r0, r4, #PAGE_OFFSET >> 18 - orr r6, r7, #(PHYS_OFFSET & 0xff000000) - .if (PHYS_OFFSET & 0x00f00000) - orr r6, r6, #(PHYS_OFFSET & 0x00f00000) - .endif - str r6, [r0] + mov r0, r2, lsr #20 + movs r0, r0, lsl #20 + moveq r0, r8 + sub r3, r0, r8 + add r3, r3, #PAGE_OFFSET + add r3, r4, r3, lsr #18 + orr r6, r7, r0 + str r6, [r3] #ifdef CONFIG_DEBUG_LL #ifndef CONFIG_DEBUG_ICEDCC @@ -391,6 +395,7 @@ ENDPROC(__turn_mmu_on) #ifdef CONFIG_SMP_ON_UP + __INIT __fixup_smp: and r3, r9, #0x000f0000 @ architecture version teq r3, #0x000f0000 @ CPU ID supported? @@ -415,18 +420,7 @@ __fixup_smp_on_up: sub r3, r0, r3 add r4, r4, r3 add r5, r5, r3 -2: cmp r4, r5 - movhs pc, lr - ldmia r4!, {r0, r6} - ARM( str r6, [r0, r3] ) - THUMB( add r0, r0, r3 ) -#ifdef __ARMEB__ - THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. -#endif - THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords - THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. - THUMB( strh r6, [r0] ) - b 2b + b __do_fixup_smp_on_up ENDPROC(__fixup_smp) .align @@ -440,7 +434,156 @@ smp_on_up: ALT_SMP(.long 1) ALT_UP(.long 0) .popsection +#endif + .text +__do_fixup_smp_on_up: + cmp r4, r5 + movhs pc, lr + ldmia r4!, {r0, r6} + ARM( str r6, [r0, r3] ) + THUMB( add r0, r0, r3 ) +#ifdef __ARMEB__ + THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. +#endif + THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords + THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. + THUMB( strh r6, [r0] ) + b __do_fixup_smp_on_up +ENDPROC(__do_fixup_smp_on_up) + +ENTRY(fixup_smp) + stmfd sp!, {r4 - r6, lr} + mov r4, r0 + add r5, r0, r1 + mov r3, #0 + bl __do_fixup_smp_on_up + ldmfd sp!, {r4 - r6, pc} +ENDPROC(fixup_smp) + +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + +/* __fixup_pv_table - patch the stub instructions with the delta between + * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and + * can be expressed by an immediate shifter operand. The stub instruction + * has a form of '(add|sub) rd, rn, #imm'. + */ + __HEAD +__fixup_pv_table: + adr r0, 1f + ldmia r0, {r3-r5, r7} + sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET + add r4, r4, r3 @ adjust table start address + add r5, r5, r3 @ adjust table end address + add r7, r7, r3 @ adjust __pv_phys_offset address + str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset +#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + mov r6, r3, lsr #24 @ constant for add/sub instructions + teq r3, r6, lsl #24 @ must be 16MiB aligned +#else + mov r6, r3, lsr #16 @ constant for add/sub instructions + teq r3, r6, lsl #16 @ must be 64kiB aligned +#endif +THUMB( it ne @ cross section branch ) + bne __error + str r6, [r7, #4] @ save to __pv_offset + b __fixup_a_pv_table +ENDPROC(__fixup_pv_table) + + .align +1: .long . + .long __pv_table_begin + .long __pv_table_end +2: .long __pv_phys_offset + + .text +__fixup_a_pv_table: +#ifdef CONFIG_THUMB2_KERNEL +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + lsls r0, r6, #24 + lsr r6, #8 + beq 1f + clz r7, r0 + lsr r0, #24 + lsl r0, r7 + bic r0, 0x0080 + lsrs r7, #1 + orrcs r0, #0x0080 + orr r0, r0, r7, lsl #12 +#endif +1: lsls r6, #24 + beq 4f + clz r7, r6 + lsr r6, #24 + lsl r6, r7 + bic r6, #0x0080 + lsrs r7, #1 + orrcs r6, #0x0080 + orr r6, r6, r7, lsl #12 + orr r6, #0x4000 + b 4f +2: @ at this point the C flag is always clear + add r7, r3 +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + ldrh ip, [r7] + tst ip, 0x0400 @ the i bit tells us LS or MS byte + beq 3f + cmp r0, #0 @ set C flag, and ... + biceq ip, 0x0400 @ immediate zero value has a special encoding + streqh ip, [r7] @ that requires the i bit cleared +#endif +3: ldrh ip, [r7, #2] + and ip, 0x8f00 + orrcc ip, r6 @ mask in offset bits 31-24 + orrcs ip, r0 @ mask in offset bits 23-16 + strh ip, [r7, #2] +4: cmp r4, r5 + ldrcc r7, [r4], #4 @ use branch for delay slot + bcc 2b + bx lr +#else +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + and r0, r6, #255 @ offset bits 23-16 + mov r6, r6, lsr #8 @ offset bits 31-24 +#else + mov r0, #0 @ just in case... +#endif + b 3f +2: ldr ip, [r7, r3] + bic ip, ip, #0x000000ff + tst ip, #0x400 @ rotate shift tells us LS or MS byte + orrne ip, ip, r6 @ mask in offset bits 31-24 + orreq ip, ip, r0 @ mask in offset bits 23-16 + str ip, [r7, r3] +3: cmp r4, r5 + ldrcc r7, [r4], #4 @ use branch for delay slot + bcc 2b + mov pc, lr +#endif +ENDPROC(__fixup_a_pv_table) + +ENTRY(fixup_pv_table) + stmfd sp!, {r4 - r7, lr} + ldr r2, 2f @ get address of __pv_phys_offset + mov r3, #0 @ no offset + mov r4, r0 @ r0 = table start + add r5, r0, r1 @ r1 = table size + ldr r6, [r2, #4] @ get __pv_offset + bl __fixup_a_pv_table + ldmfd sp!, {r4 - r7, pc} +ENDPROC(fixup_pv_table) + + .align +2: .long __pv_phys_offset + + .data + .globl __pv_phys_offset + .type __pv_phys_offset, %object +__pv_phys_offset: + .long 0 + .size __pv_phys_offset, . - __pv_phys_offset +__pv_offset: + .long 0 #endif #include "head-common.S" diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index c9f3f046757..44b84fe6e1b 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -137,11 +137,10 @@ static u8 get_debug_arch(void) u32 didr; /* Do we implement the extended CPUID interface? */ - if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { - pr_warning("CPUID feature registers not supported. " - "Assuming v6 debug is present.\n"); + if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf), + "CPUID feature registers not supported. " + "Assuming v6 debug is present.\n")) return ARM_DEBUG_ARCH_V6; - } ARM_DBG_READ(c0, 0, didr); return (didr >> 16) & 0xf; @@ -152,6 +151,12 @@ u8 arch_get_debug_arch(void) return debug_arch; } +static int debug_arch_supported(void) +{ + u8 arch = get_debug_arch(); + return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14; +} + /* Determine number of BRP register available. */ static int get_num_brp_resources(void) { @@ -268,6 +273,9 @@ out: int hw_breakpoint_slots(int type) { + if (!debug_arch_supported()) + return 0; + /* * We can be called early, so don't rely on * our static variables being initialised. @@ -828,20 +836,33 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, /* * One-time initialisation. */ -static void reset_ctrl_regs(void *unused) +static void reset_ctrl_regs(void *info) { - int i; + int i, cpu = smp_processor_id(); + u32 dbg_power; + cpumask_t *cpumask = info; /* * v7 debug contains save and restore registers so that debug state - * can be maintained across low-power modes without leaving - * the debug logic powered up. It is IMPLEMENTATION DEFINED whether - * we can write to the debug registers out of reset, so we must - * unlock the OS Lock Access Register to avoid taking undefined - * instruction exceptions later on. + * can be maintained across low-power modes without leaving the debug + * logic powered up. It is IMPLEMENTATION DEFINED whether we can access + * the debug registers out of reset, so we must unlock the OS Lock + * Access Register to avoid taking undefined instruction exceptions + * later on. */ if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { /* + * Ensure sticky power-down is clear (i.e. debug logic is + * powered up). + */ + asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); + if ((dbg_power & 0x1) == 0) { + pr_warning("CPU %d debug is powered down!\n", cpu); + cpumask_or(cpumask, cpumask, cpumask_of(cpu)); + return; + } + + /* * Unconditionally clear the lock by writing a value * other than 0xC5ACCE55 to the access register. */ @@ -879,10 +900,11 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { static int __init arch_hw_breakpoint_init(void) { u32 dscr; + cpumask_t cpumask = { CPU_BITS_NONE }; debug_arch = get_debug_arch(); - if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) { + if (!debug_arch_supported()) { pr_info("debug architecture 0x%x unsupported.\n", debug_arch); return 0; } @@ -899,18 +921,24 @@ static int __init arch_hw_breakpoint_init(void) pr_info("%d breakpoint(s) reserved for watchpoint " "single-step.\n", core_num_reserved_brps); + /* + * Reset the breakpoint resources. We assume that a halting + * debugger will leave the world in a nice state for us. + */ + on_each_cpu(reset_ctrl_regs, &cpumask, 1); + if (!cpumask_empty(&cpumask)) { + core_num_brps = 0; + core_num_reserved_brps = 0; + core_num_wrps = 0; + return 0; + } + ARM_DBG_READ(c1, 0, dscr); if (dscr & ARM_DSCR_HDBGEN) { + max_watchpoint_len = 4; pr_warning("halting debug mode enabled. Assuming maximum " - "watchpoint size of 4 bytes."); + "watchpoint size of %u bytes.", max_watchpoint_len); } else { - /* - * Reset the breakpoint resources. We assume that a halting - * debugger will leave the world in a nice state for us. - */ - smp_call_function(reset_ctrl_regs, NULL, 1); - reset_ctrl_regs(NULL); - /* Work out the maximum supported watchpoint length. */ max_watchpoint_len = get_max_wp_len(); pr_info("maximum watchpoint size is %u bytes.\n", diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 28536e352de..3535d3793e6 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -179,14 +179,21 @@ int __init arch_probe_nr_irqs(void) #ifdef CONFIG_HOTPLUG_CPU -static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) +static bool migrate_one_irq(struct irq_data *d) { - pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu); + unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); + bool ret = false; - raw_spin_lock_irq(&desc->lock); - desc->irq_data.chip->irq_set_affinity(&desc->irq_data, - cpumask_of(cpu), false); - raw_spin_unlock_irq(&desc->lock); + if (cpu >= nr_cpu_ids) { + cpu = cpumask_any(cpu_online_mask); + ret = true; + } + + pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); + + d->chip->irq_set_affinity(d, cpumask_of(cpu), true); + + return ret; } /* @@ -198,25 +205,30 @@ void migrate_irqs(void) { unsigned int i, cpu = smp_processor_id(); struct irq_desc *desc; + unsigned long flags; + + local_irq_save(flags); for_each_irq_desc(i, desc) { struct irq_data *d = &desc->irq_data; + bool affinity_broken = false; - if (d->node == cpu) { - unsigned int newcpu = cpumask_any_and(d->affinity, - cpu_online_mask); - if (newcpu >= nr_cpu_ids) { - if (printk_ratelimit()) - printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", - i, cpu); + raw_spin_lock(&desc->lock); + do { + if (desc->action == NULL) + break; - cpumask_setall(d->affinity); - newcpu = cpumask_any_and(d->affinity, - cpu_online_mask); - } + if (d->node != cpu) + break; - route_irq(desc, i, newcpu); - } + affinity_broken = migrate_one_irq(d); + } while (0); + raw_spin_unlock(&desc->lock); + + if (affinity_broken && printk_ratelimit()) + pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); } + + local_irq_restore(flags); } #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 2c1f0050c9c..8f6ed43861f 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c @@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) return space_cccc_1100_010x(insn, asi); - } else if ((insn & 0x0e000000) == 0x0c400000) { + } else if ((insn & 0x0e000000) == 0x0c000000) { return space_cccc_110x(insn, asi); diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 2cfe8161b47..fee7c36349e 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -22,6 +22,7 @@ #include <asm/pgtable.h> #include <asm/sections.h> +#include <asm/smp_plat.h> #include <asm/unwind.h> #ifdef CONFIG_XIP_KERNEL @@ -75,6 +76,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) { unsigned long loc; Elf32_Sym *sym; + const char *symname; s32 offset; #ifdef CONFIG_THUMB2_KERNEL u32 upper, lower, sign, j1, j2; @@ -82,18 +84,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, offset = ELF32_R_SYM(rel->r_info); if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { - printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n", + pr_err("%s: section %u reloc %u: bad relocation sym offset\n", module->name, relindex, i); return -ENOEXEC; } sym = ((Elf32_Sym *)symsec->sh_addr) + offset; + symname = strtab + sym->st_name; if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) { - printk(KERN_ERR "%s: out of bounds relocation, " - "section %d reloc %d offset %d size %d\n", - module->name, relindex, i, rel->r_offset, - dstsec->sh_size); + pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n", + module->name, relindex, i, symname, + rel->r_offset, dstsec->sh_size); return -ENOEXEC; } @@ -119,10 +121,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, if (offset & 3 || offset <= (s32)0xfe000000 || offset >= (s32)0x02000000) { - printk(KERN_ERR - "%s: relocation out of range, section " - "%d reloc %d sym '%s'\n", module->name, - relindex, i, strtab + sym->st_name); + pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", + module->name, relindex, i, symname, + ELF32_R_TYPE(rel->r_info), loc, + sym->st_value); return -ENOEXEC; } @@ -195,10 +197,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, if (!(offset & 1) || offset <= (s32)0xff000000 || offset >= (s32)0x01000000) { - printk(KERN_ERR - "%s: relocation out of range, section " - "%d reloc %d sym '%s'\n", module->name, - relindex, i, strtab + sym->st_name); + pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", + module->name, relindex, i, symname, + ELF32_R_TYPE(rel->r_info), loc, + sym->st_value); return -ENOEXEC; } @@ -268,12 +270,29 @@ struct mod_unwind_map { const Elf_Shdr *txt_sec; }; +static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, + const Elf_Shdr *sechdrs, const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + + return NULL; +} + +extern void fixup_pv_table(const void *, unsigned long); +extern void fixup_smp(const void *, unsigned long); + int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { + const Elf_Shdr *s = NULL; #ifdef CONFIG_ARM_UNWIND const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; + const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; struct mod_unwind_map maps[ARM_SEC_MAX]; int i; @@ -315,6 +334,14 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, maps[i].txt_sec->sh_addr, maps[i].txt_sec->sh_size); #endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + s = find_mod_section(hdr, sechdrs, ".pv_table"); + if (s) + fixup_pv_table((void *)s->sh_addr, s->sh_size); +#endif + s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); + if (s && !is_smp()) + fixup_smp((void *)s->sh_addr, s->sh_size); return 0; } diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5efa2647a2f..d150ad1ccb5 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -700,7 +700,7 @@ user_backtrace(struct frame_tail __user *tail, * Frame pointers should strictly progress back up the stack * (towards higher addresses). */ - if (tail >= buftail.fp) + if (tail + 1 >= buftail.fp) return NULL; return buftail.fp - 1; diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index c058bfc8532..6fc2d228db5 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -30,7 +30,7 @@ * enable the interrupt. */ -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) enum armv6_perf_types { ARMV6_PERFCTR_ICACHE_MISS = 0x0, ARMV6_PERFCTR_IBUF_STALL = 0x1, @@ -669,4 +669,4 @@ static const struct arm_pmu *__init armv6mpcore_pmu_init(void) { return NULL; } -#endif /* CONFIG_CPU_V6 */ +#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index b8af96ea62e..2c79eec1926 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -97,28 +97,34 @@ set_irq_affinity(int irq, irq, cpu); return err; #else - return 0; + return -EINVAL; #endif } static int init_cpu_pmu(void) { - int i, err = 0; + int i, irqs, err = 0; struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; - if (!pdev) { - err = -ENODEV; - goto out; - } + if (!pdev) + return -ENODEV; + + irqs = pdev->num_resources; + + /* + * If we have a single PMU interrupt that we can't shift, assume that + * we're running on a uniprocessor machine and continue. + */ + if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) + return 0; - for (i = 0; i < pdev->num_resources; ++i) { + for (i = 0; i < irqs; ++i) { err = set_irq_affinity(platform_get_irq(pdev, i), i); if (err) break; } -out: return err; } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 19c6816db61..2bf27f364d0 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -26,8 +26,6 @@ #include <asm/system.h> #include <asm/traps.h> -#include "ptrace.h" - #define REG_PC 15 #define REG_PSR 16 /* @@ -184,389 +182,12 @@ put_user_reg(struct task_struct *task, int offset, long data) return ret; } -static inline int -read_u32(struct task_struct *task, unsigned long addr, u32 *res) -{ - int ret; - - ret = access_process_vm(task, addr, res, sizeof(*res), 0); - - return ret == sizeof(*res) ? 0 : -EIO; -} - -static inline int -read_instr(struct task_struct *task, unsigned long addr, u32 *res) -{ - int ret; - - if (addr & 1) { - u16 val; - ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0); - ret = ret == sizeof(val) ? 0 : -EIO; - *res = val; - } else { - u32 val; - ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0); - ret = ret == sizeof(val) ? 0 : -EIO; - *res = val; - } - return ret; -} - -/* - * Get value of register `rn' (in the instruction) - */ -static unsigned long -ptrace_getrn(struct task_struct *child, unsigned long insn) -{ - unsigned int reg = (insn >> 16) & 15; - unsigned long val; - - val = get_user_reg(child, reg); - if (reg == 15) - val += 8; - - return val; -} - -/* - * Get value of operand 2 (in an ALU instruction) - */ -static unsigned long -ptrace_getaluop2(struct task_struct *child, unsigned long insn) -{ - unsigned long val; - int shift; - int type; - - if (insn & 1 << 25) { - val = insn & 255; - shift = (insn >> 8) & 15; - type = 3; - } else { - val = get_user_reg (child, insn & 15); - - if (insn & (1 << 4)) - shift = (int)get_user_reg (child, (insn >> 8) & 15); - else - shift = (insn >> 7) & 31; - - type = (insn >> 5) & 3; - } - - switch (type) { - case 0: val <<= shift; break; - case 1: val >>= shift; break; - case 2: - val = (((signed long)val) >> shift); - break; - case 3: - val = (val >> shift) | (val << (32 - shift)); - break; - } - return val; -} - -/* - * Get value of operand 2 (in a LDR instruction) - */ -static unsigned long -ptrace_getldrop2(struct task_struct *child, unsigned long insn) -{ - unsigned long val; - int shift; - int type; - - val = get_user_reg(child, insn & 15); - shift = (insn >> 7) & 31; - type = (insn >> 5) & 3; - - switch (type) { - case 0: val <<= shift; break; - case 1: val >>= shift; break; - case 2: - val = (((signed long)val) >> shift); - break; - case 3: - val = (val >> shift) | (val << (32 - shift)); - break; - } - return val; -} - -#define OP_MASK 0x01e00000 -#define OP_AND 0x00000000 -#define OP_EOR 0x00200000 -#define OP_SUB 0x00400000 -#define OP_RSB 0x00600000 -#define OP_ADD 0x00800000 -#define OP_ADC 0x00a00000 -#define OP_SBC 0x00c00000 -#define OP_RSC 0x00e00000 -#define OP_ORR 0x01800000 -#define OP_MOV 0x01a00000 -#define OP_BIC 0x01c00000 -#define OP_MVN 0x01e00000 - -static unsigned long -get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn) -{ - u32 alt = 0; - - switch (insn & 0x0e000000) { - case 0x00000000: - case 0x02000000: { - /* - * data processing - */ - long aluop1, aluop2, ccbit; - - if ((insn & 0x0fffffd0) == 0x012fff10) { - /* - * bx or blx - */ - alt = get_user_reg(child, insn & 15); - break; - } - - - if ((insn & 0xf000) != 0xf000) - break; - - aluop1 = ptrace_getrn(child, insn); - aluop2 = ptrace_getaluop2(child, insn); - ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0; - - switch (insn & OP_MASK) { - case OP_AND: alt = aluop1 & aluop2; break; - case OP_EOR: alt = aluop1 ^ aluop2; break; - case OP_SUB: alt = aluop1 - aluop2; break; - case OP_RSB: alt = aluop2 - aluop1; break; - case OP_ADD: alt = aluop1 + aluop2; break; - case OP_ADC: alt = aluop1 + aluop2 + ccbit; break; - case OP_SBC: alt = aluop1 - aluop2 + ccbit; break; - case OP_RSC: alt = aluop2 - aluop1 + ccbit; break; - case OP_ORR: alt = aluop1 | aluop2; break; - case OP_MOV: alt = aluop2; break; - case OP_BIC: alt = aluop1 & ~aluop2; break; - case OP_MVN: alt = ~aluop2; break; - } - break; - } - - case 0x04000000: - case 0x06000000: - /* - * ldr - */ - if ((insn & 0x0010f000) == 0x0010f000) { - unsigned long base; - - base = ptrace_getrn(child, insn); - if (insn & 1 << 24) { - long aluop2; - - if (insn & 0x02000000) - aluop2 = ptrace_getldrop2(child, insn); - else - aluop2 = insn & 0xfff; - - if (insn & 1 << 23) - base += aluop2; - else - base -= aluop2; - } - read_u32(child, base, &alt); - } - break; - - case 0x08000000: - /* - * ldm - */ - if ((insn & 0x00108000) == 0x00108000) { - unsigned long base; - unsigned int nr_regs; - - if (insn & (1 << 23)) { - nr_regs = hweight16(insn & 65535) << 2; - - if (!(insn & (1 << 24))) - nr_regs -= 4; - } else { - if (insn & (1 << 24)) - nr_regs = -4; - else - nr_regs = 0; - } - - base = ptrace_getrn(child, insn); - - read_u32(child, base + nr_regs, &alt); - break; - } - break; - - case 0x0a000000: { - /* - * bl or b - */ - signed long displ; - /* It's a branch/branch link: instead of trying to - * figure out whether the branch will be taken or not, - * we'll put a breakpoint at both locations. This is - * simpler, more reliable, and probably not a whole lot - * slower than the alternative approach of emulating the - * branch. - */ - displ = (insn & 0x00ffffff) << 8; - displ = (displ >> 6) + 8; - if (displ != 0 && displ != 4) - alt = pc + displ; - } - break; - } - - return alt; -} - -static int -swap_insn(struct task_struct *task, unsigned long addr, - void *old_insn, void *new_insn, int size) -{ - int ret; - - ret = access_process_vm(task, addr, old_insn, size, 0); - if (ret == size) - ret = access_process_vm(task, addr, new_insn, size, 1); - return ret; -} - -static void -add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr) -{ - int nr = dbg->nsaved; - - if (nr < 2) { - u32 new_insn = BREAKINST_ARM; - int res; - - res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4); - - if (res == 4) { - dbg->bp[nr].address = addr; - dbg->nsaved += 1; - } - } else - printk(KERN_ERR "ptrace: too many breakpoints\n"); -} - -/* - * Clear one breakpoint in the user program. We copy what the hardware - * does and use bit 0 of the address to indicate whether this is a Thumb - * breakpoint or an ARM breakpoint. - */ -static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp) -{ - unsigned long addr = bp->address; - union debug_insn old_insn; - int ret; - - if (addr & 1) { - ret = swap_insn(task, addr & ~1, &old_insn.thumb, - &bp->insn.thumb, 2); - - if (ret != 2 || old_insn.thumb != BREAKINST_THUMB) - printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at " - "0x%08lx (0x%04x)\n", task->comm, - task_pid_nr(task), addr, old_insn.thumb); - } else { - ret = swap_insn(task, addr & ~3, &old_insn.arm, - &bp->insn.arm, 4); - - if (ret != 4 || old_insn.arm != BREAKINST_ARM) - printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at " - "0x%08lx (0x%08x)\n", task->comm, - task_pid_nr(task), addr, old_insn.arm); - } -} - -void ptrace_set_bpt(struct task_struct *child) -{ - struct pt_regs *regs; - unsigned long pc; - u32 insn; - int res; - - regs = task_pt_regs(child); - pc = instruction_pointer(regs); - - if (thumb_mode(regs)) { - printk(KERN_WARNING "ptrace: can't handle thumb mode\n"); - return; - } - - res = read_instr(child, pc, &insn); - if (!res) { - struct debug_info *dbg = &child->thread.debug; - unsigned long alt; - - dbg->nsaved = 0; - - alt = get_branch_address(child, pc, insn); - if (alt) - add_breakpoint(child, dbg, alt); - - /* - * Note that we ignore the result of setting the above - * breakpoint since it may fail. When it does, this is - * not so much an error, but a forewarning that we may - * be receiving a prefetch abort shortly. - * - * If we don't set this breakpoint here, then we can - * lose control of the thread during single stepping. - */ - if (!alt || predicate(insn) != PREDICATE_ALWAYS) - add_breakpoint(child, dbg, pc + 4); - } -} - -/* - * Ensure no single-step breakpoint is pending. Returns non-zero - * value if child was being single-stepped. - */ -void ptrace_cancel_bpt(struct task_struct *child) -{ - int i, nsaved = child->thread.debug.nsaved; - - child->thread.debug.nsaved = 0; - - if (nsaved > 2) { - printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved); - nsaved = 2; - } - - for (i = 0; i < nsaved; i++) - clear_breakpoint(child, &child->thread.debug.bp[i]); -} - -void user_disable_single_step(struct task_struct *task) -{ - task->ptrace &= ~PT_SINGLESTEP; - ptrace_cancel_bpt(task); -} - -void user_enable_single_step(struct task_struct *task) -{ - task->ptrace |= PT_SINGLESTEP; -} - /* * Called by kernel/ptrace.c when detaching.. */ void ptrace_disable(struct task_struct *child) { - user_disable_single_step(child); + /* Nothing to do. */ } /* @@ -576,8 +197,6 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) { siginfo_t info; - ptrace_cancel_bpt(tsk); - info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; @@ -996,10 +615,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num, while (!(arch_ctrl.len & 0x1)) arch_ctrl.len >>= 1; - if (idx & 0x1) - reg = encode_ctrl_reg(arch_ctrl); - else + if (num & 0x1) reg = bp->attr.bp_addr; + else + reg = encode_ctrl_reg(arch_ctrl); } put: diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h deleted file mode 100644 index 3926605b82e..00000000000 --- a/arch/arm/kernel/ptrace.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * linux/arch/arm/kernel/ptrace.h - * - * Copyright (C) 2000-2003 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/ptrace.h> - -extern void ptrace_cancel_bpt(struct task_struct *); -extern void ptrace_set_bpt(struct task_struct *); -extern void ptrace_break(struct task_struct *, struct pt_regs *); - -/* - * Send SIGTRAP if we're single-stepping - */ -static inline void single_step_trap(struct task_struct *task) -{ - if (task->ptrace & PT_SINGLESTEP) { - ptrace_cancel_bpt(task); - send_sig(SIGTRAP, task, 1); - } -} - -static inline void single_step_clear(struct task_struct *task) -{ - if (task->ptrace & PT_SINGLESTEP) - ptrace_cancel_bpt(task); -} - -static inline void single_step_set(struct task_struct *task) -{ - if (task->ptrace & PT_SINGLESTEP) - ptrace_set_bpt(task); -} diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index df246da4cec..0b13a72f855 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -9,6 +9,7 @@ * the Free Software Foundation. */ #include <linux/module.h> +#include <linux/ftrace.h> #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) #include <linux/sched.h> diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 420b8d6485d..d1da9217427 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -226,8 +226,8 @@ int cpu_architecture(void) * Register 0 and check for VMSAv7 or PMSAv7 */ asm("mrc p15, 0, %0, c0, c1, 4" : "=r" (mmfr0)); - if ((mmfr0 & 0x0000000f) == 0x00000003 || - (mmfr0 & 0x000000f0) == 0x00000030) + if ((mmfr0 & 0x0000000f) >= 0x00000003 || + (mmfr0 & 0x000000f0) >= 0x00000030) cpu_arch = CPU_ARCH_ARMv7; else if ((mmfr0 & 0x0000000f) == 0x00000002 || (mmfr0 & 0x000000f0) == 0x00000020) @@ -308,7 +308,22 @@ static void __init cacheid_init(void) * already provide the required functionality. */ extern struct proc_info_list *lookup_processor_type(unsigned int); -extern struct machine_desc *lookup_machine_type(unsigned int); + +static void __init early_print(const char *str, ...) +{ + extern void printascii(const char *); + char buf[256]; + va_list ap; + + va_start(ap, str); + vsnprintf(buf, sizeof(buf), str, ap); + va_end(ap); + +#ifdef CONFIG_DEBUG_LL + printascii(buf); +#endif + printk("%s", buf); +} static void __init feat_v6_fixup(void) { @@ -426,21 +441,29 @@ void cpu_init(void) static struct machine_desc * __init setup_machine(unsigned int nr) { - struct machine_desc *list; + extern struct machine_desc __arch_info_begin[], __arch_info_end[]; + struct machine_desc *p; /* * locate machine in the list of supported machines. */ - list = lookup_machine_type(nr); - if (!list) { - printk("Machine configuration botched (nr %d), unable " - "to continue.\n", nr); - while (1); - } + for (p = __arch_info_begin; p < __arch_info_end; p++) + if (nr == p->nr) { + printk("Machine: %s\n", p->name); + return p; + } - printk("Machine: %s\n", list->name); + early_print("\n" + "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n" + "Available machine support:\n\nID (hex)\tNAME\n", nr); - return list; + for (p = __arch_info_begin; p < __arch_info_end; p++) + early_print("%08x\t%s\n", p->nr, p->name); + + early_print("\nPlease check your kernel config and/or bootloader.\n"); + + while (true) + /* can't use cpu_relax() here as it may require MMU setup */; } static int __init arm_add_memory(unsigned long start, unsigned long size) @@ -703,7 +726,7 @@ static struct init_tags { { tag_size(tag_core), ATAG_CORE }, { 1, PAGE_SIZE, 0xff }, { tag_size(tag_mem32), ATAG_MEM }, - { MEM_SIZE, PHYS_OFFSET }, + { MEM_SIZE }, { 0, ATAG_NONE } }; @@ -802,6 +825,8 @@ void __init setup_arch(char **cmdline_p) struct machine_desc *mdesc; char *from = default_command_line; + init_tags.mem.start = PHYS_OFFSET; + unwind_init(); setup_processor(); @@ -814,8 +839,25 @@ void __init setup_arch(char **cmdline_p) if (__atags_pointer) tags = phys_to_virt(__atags_pointer); - else if (mdesc->boot_params) - tags = phys_to_virt(mdesc->boot_params); + else if (mdesc->boot_params) { +#ifdef CONFIG_MMU + /* + * We still are executing with a minimal MMU mapping created + * with the presumption that the machine default for this + * is located in the first MB of RAM. Anything else will + * fault and silently hang the kernel at this point. + */ + if (mdesc->boot_params < PHYS_OFFSET || + mdesc->boot_params >= PHYS_OFFSET + SZ_1M) { + printk(KERN_WARNING + "Default boot params at physical 0x%08lx out of reach\n", + mdesc->boot_params); + } else +#endif + { + tags = phys_to_virt(mdesc->boot_params); + } + } #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) /* diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a620bc..cb839831764 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -20,7 +20,6 @@ #include <asm/unistd.h> #include <asm/vfp.h> -#include "ptrace.h" #include "signal.h" #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -348,8 +347,6 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) if (restore_sigframe(regs, frame)) goto badframe; - single_step_trap(current); - return regs->ARM_r0; badframe: @@ -383,8 +380,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) goto badframe; - single_step_trap(current); - return regs->ARM_r0; badframe: @@ -474,7 +469,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, unsigned long handler = (unsigned long)ka->sa.sa_handler; unsigned long retcode; int thumb = 0; - unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; + unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); + + cpsr |= PSR_ENDSTATE; /* * Maybe we need to deliver a 32-bit signal to a 26-bit task. @@ -704,8 +701,6 @@ static void do_signal(struct pt_regs *regs, int syscall) if (try_to_freeze()) goto no_signal; - single_step_clear(current); - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { sigset_t *oldset; @@ -724,7 +719,6 @@ static void do_signal(struct pt_regs *regs, int syscall) if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); } - single_step_set(current); return; } @@ -770,7 +764,6 @@ static void do_signal(struct pt_regs *regs, int syscall) sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); } } - single_step_set(current); } asmlinkage void diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S new file mode 100644 index 00000000000..bfad698a02e --- /dev/null +++ b/arch/arm/kernel/sleep.S @@ -0,0 +1,134 @@ +#include <linux/linkage.h> +#include <linux/threads.h> +#include <asm/asm-offsets.h> +#include <asm/assembler.h> +#include <asm/glue-cache.h> +#include <asm/glue-proc.h> +#include <asm/system.h> + .text + +/* + * Save CPU state for a suspend + * r1 = v:p offset + * r3 = virtual return function + * Note: sp is decremented to allocate space for CPU state on stack + * r0-r3,r9,r10,lr corrupted + */ +ENTRY(cpu_suspend) + mov r9, lr +#ifdef MULTI_CPU + ldr r10, =processor + mov r2, sp @ current virtual SP + ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state + ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function + sub sp, sp, r0 @ allocate CPU state on stack + mov r0, sp @ save pointer + add ip, ip, r1 @ convert resume fn to phys + stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn + ldr r3, =sleep_save_sp + add r2, sp, r1 @ convert SP to phys +#ifdef CONFIG_SMP + ALT_SMP(mrc p15, 0, lr, c0, c0, 5) + ALT_UP(mov lr, #0) + and lr, lr, #15 + str r2, [r3, lr, lsl #2] @ save phys SP +#else + str r2, [r3] @ save phys SP +#endif + mov lr, pc + ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state +#else + mov r2, sp @ current virtual SP + ldr r0, =cpu_suspend_size + sub sp, sp, r0 @ allocate CPU state on stack + mov r0, sp @ save pointer + stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn + ldr r3, =sleep_save_sp + add r2, sp, r1 @ convert SP to phys +#ifdef CONFIG_SMP + ALT_SMP(mrc p15, 0, lr, c0, c0, 5) + ALT_UP(mov lr, #0) + and lr, lr, #15 + str r2, [r3, lr, lsl #2] @ save phys SP +#else + str r2, [r3] @ save phys SP +#endif + bl cpu_do_suspend +#endif + + @ flush data cache +#ifdef MULTI_CACHE + ldr r10, =cpu_cache + mov lr, r9 + ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] +#else + mov lr, r9 + b __cpuc_flush_kern_all +#endif +ENDPROC(cpu_suspend) + .ltorg + +/* + * r0 = control register value + * r1 = v:p offset (preserved by cpu_do_resume) + * r2 = phys page table base + * r3 = L1 section flags + */ +ENTRY(cpu_resume_mmu) + adr r4, cpu_resume_turn_mmu_on + mov r4, r4, lsr #20 + orr r3, r3, r4, lsl #20 + ldr r5, [r2, r4, lsl #2] @ save old mapping + str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code + sub r2, r2, r1 + ldr r3, =cpu_resume_after_mmu + bic r1, r0, #CR_C @ ensure D-cache is disabled + b cpu_resume_turn_mmu_on +ENDPROC(cpu_resume_mmu) + .ltorg + .align 5 +cpu_resume_turn_mmu_on: + mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc + mrc p15, 0, r1, c0, c0, 0 @ read id reg + mov r1, r1 + mov r1, r1 + mov pc, r3 @ jump to virtual address +ENDPROC(cpu_resume_turn_mmu_on) +cpu_resume_after_mmu: + str r5, [r2, r4, lsl #2] @ restore old mapping + mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache + mov pc, lr +ENDPROC(cpu_resume_after_mmu) + +/* + * Note: Yes, part of the following code is located into the .data section. + * This is to allow sleep_save_sp to be accessed with a relative load + * while we can't rely on any MMU translation. We could have put + * sleep_save_sp in the .text section as well, but some setups might + * insist on it to be truly read-only. + */ + .data + .align +ENTRY(cpu_resume) +#ifdef CONFIG_SMP + adr r0, sleep_save_sp + ALT_SMP(mrc p15, 0, r1, c0, c0, 5) + ALT_UP(mov r1, #0) + and r1, r1, #15 + ldr r0, [r0, r1, lsl #2] @ stack phys addr +#else + ldr r0, sleep_save_sp @ stack phys addr +#endif + msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off +#ifdef MULTI_CPU + ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn +#else + ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn + b cpu_do_resume +#endif +ENDPROC(cpu_resume) + +sleep_save_sp: + .rept CONFIG_NR_CPUS + .long 0 @ preserve stack phys ptr here + .endr diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index 9ab4149bd98..a1e757c3439 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base) */ flush_cache_all(); } + +/* + * Set the executing CPUs power mode as defined. This will be in + * preparation for it executing a WFI instruction. + * + * This function must be called with preemption disabled, and as it + * has the side effect of disabling coherency, caches must have been + * flushed. Interrupts must also have been disabled. + */ +int scu_power_mode(void __iomem *scu_base, unsigned int mode) +{ + unsigned int val; + int cpu = smp_processor_id(); + + if (mode > 3 || mode == 1 || cpu > 3) + return -EINVAL; + + val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03; + val |= mode; + __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu); + + return 0; +} diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 26685c2f7a4..f5cf660eefc 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -15,7 +15,7 @@ #include <linux/string.h> /* memcpy */ #include <asm/cputype.h> #include <asm/mach/map.h> -#include <mach/memory.h> +#include <asm/memory.h> #include "tcm.h" static struct gen_pool *tcm_pool; diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 3d76bf23373..1ff46cabc7e 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -107,9 +107,7 @@ void timer_tick(void) { profile_tick(CPU_PROFILING); do_leds(); - write_seqlock(&xtime_lock); - do_timer(1); - write_sequnlock(&xtime_lock); + xtime_update(1); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index ee57640ba2b..21ac43f1c2d 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -23,6 +23,7 @@ #include <linux/kexec.h> #include <linux/delay.h> #include <linux/init.h> +#include <linux/sched.h> #include <asm/atomic.h> #include <asm/cacheflush.h> @@ -32,7 +33,6 @@ #include <asm/unwind.h> #include <asm/tls.h> -#include "ptrace.h" #include "signal.h" static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; @@ -256,7 +256,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt return ret; } -DEFINE_SPINLOCK(die_lock); +static DEFINE_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3f203..b4348e62ef0 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -21,6 +21,12 @@ #define ARM_CPU_KEEP(x) #endif +#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) +#define ARM_EXIT_KEEP(x) x +#else +#define ARM_EXIT_KEEP(x) +#endif + OUTPUT_ARCH(arm) ENTRY(stext) @@ -43,6 +49,7 @@ SECTIONS _sinittext = .; HEAD_TEXT INIT_TEXT + ARM_EXIT_KEEP(EXIT_TEXT) _einittext = .; ARM_CPU_DISCARD(PROC_INFO) __arch_info_begin = .; @@ -57,6 +64,10 @@ SECTIONS __smpalt_end = .; #endif + __pv_table_begin = .; + *(.pv_table) + __pv_table_end = .; + INIT_SETUP(16) INIT_CALLS @@ -67,10 +78,11 @@ SECTIONS #ifndef CONFIG_XIP_KERNEL __init_begin = _stext; INIT_DATA + ARM_EXIT_KEEP(EXIT_DATA) #endif } - PERCPU(PAGE_SIZE) + PERCPU(32, PAGE_SIZE) #ifndef CONFIG_XIP_KERNEL . = ALIGN(PAGE_SIZE); @@ -162,6 +174,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_DATA + ARM_EXIT_KEEP(EXIT_DATA) . = ALIGN(PAGE_SIZE); __init_end = .; #endif @@ -247,6 +260,8 @@ SECTIONS } #endif + NOTES + BSS_SECTION(0, 0, 0) _end = .; |