diff options
38 files changed, 461 insertions, 791 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 38bf684448e..1fd3f280b58 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -63,6 +63,10 @@ config GENERIC_CLOCKEVENTS_BROADCAST depends on GENERIC_CLOCKEVENTS default y if SMP +config KTIME_SCALAR + bool + default y + config HAVE_TCM bool select GENERIC_ALLOCATOR @@ -178,11 +182,6 @@ config FIQ config ARCH_MTD_XIP bool -config ARM_L1_CACHE_SHIFT_6 - bool - help - Setting ARM L1 cache line size to 64 Bytes. - config VECTORS_BASE hex default 0xffff0000 if MMU || CPU_HIGH_VECTOR @@ -1152,7 +1151,7 @@ config ARM_ERRATA_742231 config PL310_ERRATA_588369 bool "Clean & Invalidate maintenance operations do not invalidate clean lines" - depends on CACHE_L2X0 && ARCH_OMAP4 + depends on CACHE_L2X0 help The PL310 L2 cache controller implements three types of Clean & Invalidate maintenance operations: by Physical Address @@ -1161,8 +1160,7 @@ config PL310_ERRATA_588369 clean operation followed immediately by an invalidate operation, both performing to the same memory location. This functionality is not correctly implemented in PL310 as clean lines are not - invalidated as a result of these operations. Note that this errata - uses Texas Instrument's secure monitor api. + invalidated as a result of these operations. config ARM_ERRATA_720789 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" @@ -1176,6 +1174,17 @@ config ARM_ERRATA_720789 tables. The workaround changes the TLB flushing routines to invalidate entries regardless of the ASID. +config PL310_ERRATA_727915 + bool "Background Clean & Invalidate by Way operation can cause data corruption" + depends on CACHE_L2X0 + help + PL310 implements the Clean & Invalidate by Way L2 cache maintenance + operation (offset 0x7FC). This operation runs in background so that + PL310 can handle normal accesses while it is in progress. Under very + rare circumstances, due to this erratum, write data can be lost when + PL310 treats a cacheable write transaction during a Clean & + Invalidate by Way operation. + config ARM_ERRATA_743622 bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption" depends on CPU_V7 @@ -1430,6 +1439,37 @@ config THUMB2_KERNEL If unsure, say N. +config THUMB2_AVOID_R_ARM_THM_JUMP11 + bool "Work around buggy Thumb-2 short branch relocations in gas" + depends on THUMB2_KERNEL && MODULES + default y + help + Various binutils versions can resolve Thumb-2 branches to + locally-defined, preemptible global symbols as short-range "b.n" + branch instructions. + + This is a problem, because there's no guarantee the final + destination of the symbol, or any candidate locations for a + trampoline, are within range of the branch. For this reason, the + kernel does not support fixing up the R_ARM_THM_JUMP11 (102) + relocation in modules at all, and it makes little sense to add + support. + + The symptom is that the kernel fails with an "unsupported + relocation" error when loading some modules. + + Until fixed tools are available, passing + -fno-optimize-sibling-calls to gcc should prevent gcc generating + code which hits this problem, at the cost of a bit of extra runtime + stack usage in some cases. + + The problem is described in more detail at: + https://bugs.launchpad.net/binutils-linaro/+bug/725126 + + Only Thumb-2 kernels are affected. + + Unless you are sure your tools don't have this problem, say Y. + config ARM_ASM_UNIFIED bool diff --git a/arch/arm/Makefile b/arch/arm/Makefile index cd56c9129a1..55eca9ed760 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -105,6 +105,10 @@ AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mau AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN) AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb +# Work around buggy relocation from gas if requested: +ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y) +CFLAGS_MODULE +=-fno-optimize-sibling-calls +endif endif # Need -Uarm for gcc < 3.x @@ -281,7 +285,7 @@ bzImage: zImage zImage Image xipImage bootpImage uImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ -zinstall install: vmlinux +zinstall uinstall install: vmlinux $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ # We use MRPROPER_FILES and CLEAN_FILES now @@ -302,6 +306,7 @@ define archhelp echo ' (supply initrd image via make variable INITRD=<path>)' echo ' install - Install uncompressed kernel' echo ' zinstall - Install compressed kernel' + echo ' uinstall - Install U-Boot wrapped compressed kernel' echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or' echo ' (distribution) /sbin/$(INSTALLKERNEL) or' echo ' install to $$(INSTALL_PATH) and run lilo' diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index 4d26f2c52a7..9128fddf110 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -99,6 +99,10 @@ zinstall: $(obj)/zImage $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(obj)/zImage System.map "$(INSTALL_PATH)" +uinstall: $(obj)/uImage + $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ + $(obj)/uImage System.map "$(INSTALL_PATH)" + zi: $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(obj)/zImage System.map "$(INSTALL_PATH)" diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 5f3a1614dc6..f9f77c65dff 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -98,9 +98,11 @@ endif EXTRA_CFLAGS := -fpic -fno-builtin EXTRA_AFLAGS := -Wa,-march=all +# Provide size of uncompressed kernel to the decompressor via a linker symbol. +LDFLAGS_vmlinux = --defsym _image_size=$(shell stat -c "%s" $(obj)/../Image) # Supply ZRELADDR to the decompressor via a linker symbol. ifneq ($(CONFIG_AUTO_ZRELADDR),y) -LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR) +LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR) endif ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) LDFLAGS_vmlinux += --be8 diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 7193884ed8b..39859216af0 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -128,14 +128,14 @@ wait: mrc p14, 0, pc, c0, c1, 0 .arm @ Always enter in ARM state start: .type start,#function - THUMB( adr r12, BSYM(1f) ) - THUMB( bx r12 ) - THUMB( .rept 6 ) - ARM( .rept 8 ) + .rept 7 mov r0, r0 .endr + ARM( mov r0, r0 ) + ARM( b 1f ) + THUMB( adr r12, BSYM(1f) ) + THUMB( bx r12 ) - b 1f .word 0x016f2818 @ Magic numbers to help the loader .word start @ absolute load/run zImage address .word _edata @ zImage end address @@ -174,9 +174,7 @@ not_angel: */ .text - adr r0, LC0 - ldmia r0, {r1, r2, r3, r5, r6, r11, ip} - ldr sp, [r0, #28] + #ifdef CONFIG_AUTO_ZRELADDR @ determine final kernel image address mov r4, pc @@ -185,35 +183,108 @@ not_angel: #else ldr r4, =zreladdr #endif - subs r0, r0, r1 @ calculate the delta offset - @ if delta is zero, we are - beq not_relocated @ running at the address we - @ were linked at. + bl cache_on + +restart: adr r0, LC0 + ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} + ldr sp, [r0, #32] + + /* + * We might be running at a different address. We need + * to fix up various pointers. + */ + sub r0, r0, r1 @ calculate the delta offset + add r5, r5, r0 @ _start + add r6, r6, r0 @ _edata +#ifndef CONFIG_ZBOOT_ROM + /* malloc space is above the relocated stack (64k max) */ + add sp, sp, r0 + add r10, sp, #0x10000 +#else /* - * We're running at a different address. We need to fix - * up various pointers: - * r5 - zImage base address (_start) - * r6 - size of decompressed image - * r11 - GOT start - * ip - GOT end + * With ZBOOT_ROM the bss/stack is non relocatable, + * but someone could still run this code from RAM, + * in which case our reference is _edata. */ - add r5, r5, r0 + mov r10, r6 +#endif + +/* + * Check to see if we will overwrite ourselves. + * r4 = final kernel address + * r5 = start of this image + * r9 = size of decompressed image + * r10 = end of this image, including bss/stack/malloc space if non XIP + * We basically want: + * r4 >= r10 -> OK + * r4 + image length <= r5 -> OK + */ + cmp r4, r10 + bhs wont_overwrite + add r10, r4, r9 + cmp r10, r5 + bls wont_overwrite + +/* + * Relocate ourselves past the end of the decompressed kernel. + * r5 = start of this image + * r6 = _edata + * r10 = end of the decompressed kernel + * Because we always copy ahead, we need to do it from the end and go + * backward in case the source and destination overlap. + */ + /* Round up to next 256-byte boundary. */ + add r10, r10, #256 + bic r10, r10, #255 + + sub r9, r6, r5 @ size to copy + add r9, r9, #31 @ rounded up to a multiple + bic r9, r9, #31 @ ... of 32 bytes + add r6, r9, r5 + add r9, r9, r10 + +1: ldmdb r6!, {r0 - r3, r10 - r12, lr} + cmp r6, r5 + stmdb r9!, {r0 - r3, r10 - r12, lr} + bhi 1b + + /* Preserve offset to relocated code. */ + sub r6, r9, r6 + + bl cache_clean_flush + + adr r0, BSYM(restart) + add r0, r0, r6 + mov pc, r0 + +wont_overwrite: +/* + * If delta is zero, we are running at the address we were linked at. + * r0 = delta + * r2 = BSS start + * r3 = BSS end + * r4 = kernel execution address + * r7 = architecture ID + * r8 = atags pointer + * r11 = GOT start + * r12 = GOT end + * sp = stack pointer + */ + teq r0, #0 + beq not_relocated add r11, r11, r0 - add ip, ip, r0 + add r12, r12, r0 #ifndef CONFIG_ZBOOT_ROM /* * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, * we need to fix up pointers into the BSS region. - * r2 - BSS start - * r3 - BSS end - * sp - stack pointer + * Note that the stack pointer has already been fixed up. */ add r2, r2, r0 add r3, r3, r0 - add sp, sp, r0 /* * Relocate all entries in the GOT table. @@ -221,7 +292,7 @@ not_angel: 1: ldr r1, [r11, #0] @ relocate entries in the GOT add r1, r1, r0 @ table. This fixes up the str r1, [r11], #4 @ C references. - cmp r11, ip + cmp r11, r12 blo 1b #else @@ -234,7 +305,7 @@ not_angel: cmphs r3, r1 @ _end < entry addlo r1, r1, r0 @ table. This fixes up the str r1, [r11], #4 @ C references. - cmp r11, ip + cmp r11, r12 blo 1b #endif @@ -246,76 +317,24 @@ not_relocated: mov r0, #0 cmp r2, r3 blo 1b - /* - * The C runtime environment should now be setup - * sufficiently. Turn the cache on, set up some - * pointers, and start decompressing. - */ - bl cache_on - - mov r1, sp @ malloc space above stack - add r2, sp, #0x10000 @ 64k max - /* - * Check to see if we will overwrite ourselves. - * r4 = final kernel address - * r5 = start of this image - * r6 = size of decompressed image - * r2 = end of malloc space (and therefore this image) - * We basically want: - * r4 >= r2 -> OK - * r4 + image length <= r5 -> OK + * The C runtime environment should now be setup sufficiently. + * Set up some pointers, and start decompressing. + * r4 = kernel execution address + * r7 = architecture ID + * r8 = atags pointer */ - cmp r4, r2 - bhs wont_overwrite - add r0, r4, r6 - cmp r0, r5 - bls wont_overwrite - - mov r5, r2 @ decompress after malloc space - mov r0, r5 + mov r0, r4 + mov r1, sp @ malloc space above stack + add r2, sp, #0x10000 @ 64k max mov r3, r7 bl decompress_kernel - - add r0, r0, #127 + 128 @ alignment + stack - bic r0, r0, #127 @ align the kernel length -/* - * r0 = decompressed kernel length - * r1-r3 = unused - * r4 = kernel execution address - * r5 = decompressed kernel start - * r7 = architecture ID - * r8 = atags pointer - * r9-r12,r14 = corrupted - */ - add r1, r5, r0 @ end of decompressed kernel - adr r2, reloc_start - ldr r3, LC1 - add r3, r2, r3 -1: ldmia r2!, {r9 - r12, r14} @ copy relocation code - stmia r1!, {r9 - r12, r14} - ldmia r2!, {r9 - r12, r14} - stmia r1!, {r9 - r12, r14} - cmp r2, r3 - blo 1b - mov sp, r1 - add sp, sp, #128 @ relocate the stack - bl cache_clean_flush - ARM( add pc, r5, r0 ) @ call relocation code - THUMB( add r12, r5, r0 ) - THUMB( mov pc, r12 ) @ call relocation code - -/* - * We're not in danger of overwriting ourselves. Do this the simple way. - * - * r4 = kernel execution address - * r7 = architecture ID - */ -wont_overwrite: mov r0, r4 - mov r3, r7 - bl decompress_kernel - b call_kernel + bl cache_off + mov r0, #0 @ must be zero + mov r1, r7 @ restore architecture number + mov r2, r8 @ restore atags pointer + mov pc, r4 @ call kernel .align 2 .type LC0, #object @@ -323,11 +342,11 @@ LC0: .word LC0 @ r1 .word __bss_start @ r2 .word _end @ r3 .word _start @ r5 - .word _image_size @ r6 + .word _edata @ r6 + .word _image_size @ r9 .word _got_start @ r11 .word _got_end @ ip .word user_stack_end @ sp -LC1: .word reloc_end - reloc_start .size LC0, . - LC0 #ifdef CONFIG_ARCH_RPC @@ -353,7 +372,7 @@ params: ldr r0, =0x10000100 @ params_phys for RPC * On exit, * r0, r1, r2, r3, r9, r10, r12 corrupted * This routine must preserve: - * r4, r5, r6, r7, r8 + * r4, r7, r8 */ .align 5 cache_on: mov r3, #8 @ cache_on function @@ -551,43 +570,6 @@ __common_mmu_cache_on: #endif /* - * All code following this line is relocatable. It is relocated by - * the above code to the end of the decompressed kernel image and - * executed there. During this time, we have no stacks. - * - * r0 = decompressed kernel length - * r1-r3 = unused - * r4 = kernel execution address - * r5 = decompressed kernel start - * r7 = architecture ID - * r8 = atags pointer - * r9-r12,r14 = corrupted - */ - .align 5 -reloc_start: add r9, r5, r0 - sub r9, r9, #128 @ do not copy the stack - debug_reloc_start - mov r1, r4 -1: - .rept 4 - ldmia r5!, {r0, r2, r3, r10 - r12, r14} @ relocate kernel - stmia r1!, {r0, r2, r3, r10 - r12, r14} - .endr - - cmp r5, r9 - blo 1b - mov sp, r1 - add sp, sp, #128 @ relocate the stack - debug_reloc_end - -call_kernel: bl cache_clean_flush - bl cache_off - mov r0, #0 @ must be zero - mov r1, r7 @ restore architecture number - mov r2, r8 @ restore atags pointer - mov pc, r4 @ call kernel - -/* * Here follow the relocatable cache support functions for the * various processors. This is a generic hook for locating an * entry and jumping to an instruction at the specified offset @@ -791,7 +773,7 @@ proc_types: * On exit, * r0, r1, r2, r3, r9, r12 corrupted * This routine must preserve: - * r4, r6, r7 + * r4, r7, r8 */ .align 5 cache_off: mov r3, #12 @ cache_off function @@ -866,7 +848,7 @@ __armv3_mmu_cache_off: * On exit, * r1, r2, r3, r9, r10, r11, r12 corrupted * This routine must preserve: - * r0, r4, r5, r6, r7 + * r4, r6, r7, r8 */ .align 5 cache_clean_flush: @@ -1088,7 +1070,6 @@ memdump: mov r12, r0 #endif .ltorg -reloc_end: .align .section ".stack", "aw", %nobits diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index 366a924019a..5309909d728 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in @@ -43,9 +43,6 @@ SECTIONS _etext = .; - /* Assume size of decompressed image is 4x the compressed image */ - _image_size = (_etext - _text) * 4; - _got_start = .; .got : { *(.got) } _got_end = .; diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 22437721115..cb6b041c39d 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -44,6 +44,19 @@ struct gic_chip_data { void __iomem *cpu_base; }; +/* + * Supported arch specific GIC irq extension. + * Default make them NULL. + */ +struct irq_chip gic_arch_extn = { + .irq_ack = NULL, + .irq_mask = NULL, + .irq_unmask = NULL, + .irq_retrigger = NULL, + .irq_set_type = NULL, + .irq_set_wake = NULL, +}; + #ifndef MAX_GIC_NR #define MAX_GIC_NR 1 #endif @@ -74,6 +87,8 @@ static inline unsigned int gic_irq(struct irq_data *d) static void gic_ack_irq(struct irq_data *d) { spin_lock(&irq_controller_lock); + if (gic_arch_extn.irq_ack) + gic_arch_extn.irq_ack(d); writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); spin_unlock(&irq_controller_lock); } @@ -84,6 +99,8 @@ static void gic_mask_irq(struct irq_data *d) spin_lock(&irq_controller_lock); writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); + if (gic_arch_extn.irq_mask) + gic_arch_extn.irq_mask(d); spin_unlock(&irq_controller_lock); } @@ -92,6 +109,8 @@ static void gic_unmask_irq(struct irq_data *d) u32 mask = 1 << (d->irq % 32); spin_lock(&irq_controller_lock); + if (gic_arch_extn.irq_unmask) + gic_arch_extn.irq_unmask(d); writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); spin_unlock(&irq_controller_lock); } @@ -116,6 +135,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type) spin_lock(&irq_controller_lock); + if (gic_arch_extn.irq_set_type) + gic_arch_extn.irq_set_type(d, type); + val = readl(base + GIC_DIST_CONFIG + confoff); if (type == IRQ_TYPE_LEVEL_HIGH) val &= ~confmask; @@ -141,32 +163,54 @@ static int gic_set_type(struct irq_data *d, unsigned int type) return 0; } +static int gic_retrigger(struct irq_data *d) +{ + if (gic_arch_extn.irq_retrigger) + return gic_arch_extn.irq_retrigger(d); + + return -ENXIO; +} + #ifdef CONFIG_SMP -static int -gic_set_cpu(struct irq_data *d, const struct cpumask *mask_val, bool force) +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) { void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); unsigned int shift = (d->irq % 4) * 8; unsigned int cpu = cpumask_first(mask_val); - u32 val; - struct irq_desc *desc; + u32 val, mask, bit; - spin_lock(&irq_controller_lock); - desc = irq_to_desc(d->irq); - if (desc == NULL) { - spin_unlock(&irq_controller_lock); + if (cpu >= 8) return -EINVAL; - } + + mask = 0xff << shift; + bit = 1 << (cpu + shift); + + spin_lock(&irq_controller_lock); d->node = cpu; - val = readl(reg) & ~(0xff << shift); - val |= 1 << (cpu + shift); - writel(val, reg); + val = readl(reg) & ~mask; + writel(val | bit, reg); spin_unlock(&irq_controller_lock); return 0; } #endif +#ifdef CONFIG_PM +static int gic_set_wake(struct irq_data *d, unsigned int on) +{ + int ret = -ENXIO; + + if (gic_arch_extn.irq_set_wake) + ret = gic_arch_extn.irq_set_wake(d, on); + + return ret; +} + +#else +#define gic_set_wake NULL +#endif + static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) { struct gic_chip_data *chip_data = get_irq_data(irq); @@ -202,9 +246,11 @@ static struct irq_chip gic_chip = { .irq_mask = gic_mask_irq, .irq_unmask = gic_unmask_irq, .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, #ifdef CONFIG_SMP - .irq_set_affinity = gic_set_cpu, + .irq_set_affinity = gic_set_affinity, #endif + .irq_set_wake = gic_set_wake, }; void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h index 93d04acaa31..92f10cb5c70 100644 --- a/arch/arm/include/asm/a.out-core.h +++ b/arch/arm/include/asm/a.out-core.h @@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; dump->u_ssize = 0; - dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; - dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; - dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm; - dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm; - dump->u_debugreg[4] = tsk->thread.debug.nsaved; + memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg)); if (dump->start_stack < 0x04000000) dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 20ae96cc002..ed5bc9e05a4 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -23,6 +23,8 @@ #define CPUID_EXT_ISAR4 "c2, 4" #define CPUID_EXT_ISAR5 "c2, 5" +extern unsigned int processor_id; + #ifdef CONFIG_CPU_CP15 #define read_cpuid(reg) \ ({ \ @@ -43,7 +45,6 @@ __val; \ }) #else -extern unsigned int processor_id; #define read_cpuid(reg) (processor_id) #define read_cpuid_ext(reg) 0 #endif diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 84557d32100..0691f9dcc50 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -34,6 +34,7 @@ #ifndef __ASSEMBLY__ extern void __iomem *gic_cpu_base_addr; +extern struct irq_chip gic_arch_extn; void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *); void gic_secondary_init(unsigned int); diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7080e2c8fa6..a4edd19dd3d 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -19,11 +19,36 @@ extern pte_t *pkmap_page_table; +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); + +/* + * The reason for kmap_high_get() is to ensure that the currently kmap'd + * page usage count does not decrease to zero while we're using its + * existing virtual mapping in an atomic context. With a VIVT cache this + * is essential to do, but with a VIPT cache this is only an optimization + * so not to pay the price of establishing a second mapping if an existing + * one can be used. However, on platforms without hardware TLB maintenance + * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since + * the locking involved must also disable IRQs which is incompatible with + * the IPI mechanism used by global TLB operations. + */ #define ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6) +#undef ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT) +#error "The sum of features in your kernel config cannot be supported together" +#endif +#endif -extern void *kmap_high(struct page *page); +#ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); -extern void kunmap_high(struct page *page); +#else +static inline void *kmap_high_get(struct page *page) +{ + return NULL; +} +#endif /* * The following functions are already defined by <linux/highmem.h> diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 22ac140edd9..febe495d0c6 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -34,4 +34,35 @@ do { \ raw_spin_unlock(&desc->lock); \ } while(0) +#ifndef __ASSEMBLY__ +/* + * Entry/exit functions for chained handlers where the primary IRQ chip + * may implement either fasteoi or level-trigger flow control. + */ +static inline void chained_irq_enter(struct irq_chip *chip, + struct irq_desc *desc) +{ + /* FastEOI controllers require no action on entry. */ + if (chip->irq_eoi) + return; + + if (chip->irq_mask_ack) { + chip->irq_mask_ack(&desc->irq_data); + } else { + chip->irq_mask(&desc->irq_data); + if (chip->irq_ack) + chip->irq_ack(&desc->irq_data); + } +} + +static inline void chained_irq_exit(struct irq_chip *chip, + struct irq_desc *desc) +{ + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); + else + chip->irq_unmask(&desc->irq_data); +} +#endif + #endif diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index fc190092527..348d513afa9 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -31,6 +31,7 @@ struct outer_cache_fns { #ifdef CONFIG_OUTER_CACHE_SYNC void (*sync)(void); #endif |