From ebf30dc91cc8592cd72b004219cfc276b3ad2854 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 31 May 2011 16:10:00 -0700 Subject: msm: timer: Fix SMP build error Fix build breakage on SMP=y builds due to 0f7b332 (ARM: consolidate SMP cross call implementation, 2011-04-03) arch/arm/mach-msm/timer.c: In function 'local_timer_setup': arch/arm/mach-msm/timer.c:295: error: implicit declaration of function 'gic_enable_ppi' Signed-off-by: Stephen Boyd Signed-off-by: David Brown --- arch/arm/mach-msm/timer.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 38b95e949d1..9bfdd5ad244 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -23,6 +23,8 @@ #include #include +#include + #include #include -- cgit v1.2.3-18-g5258 From 74facffeca3795ffb5cf8898f5859fbb822e4c5d Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 2 Jun 2011 11:16:22 +0100 Subject: ARM: Allow SoCs to enable scatterlist chaining Allow SoCs to enable the scatterlist chaining support, which allows scatterlist tables to be broken up into smaller allocations. As support for this feature depends on the implementation details of the users of the scatterlists, we can't enable this globally without auditing all the users, which is a very big task. Instead, let SoCs progressively switch over to using this. SoC drivers using scatterlists and SoC DMA implementations need auditing before this option can be enabled for the SoC. Signed-off-by: Russell King --- arch/arm/Kconfig | 3 +++ arch/arm/include/asm/scatterlist.h | 4 ++++ 2 files changed, 7 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9adc278a22a..cc0dcbf1f6b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -37,6 +37,9 @@ config ARM Europe. There is an ARM Linux project with a web page at . +config ARM_HAS_SG_CHAIN + bool + config HAVE_PWM bool diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h index 2f87870d934..cefdb8f898a 100644 --- a/arch/arm/include/asm/scatterlist.h +++ b/arch/arm/include/asm/scatterlist.h @@ -1,6 +1,10 @@ #ifndef _ASMARM_SCATTERLIST_H #define _ASMARM_SCATTERLIST_H +#ifdef CONFIG_ARM_HAS_SG_CHAIN +#define ARCH_HAS_SG_CHAIN +#endif + #include #include #include -- cgit v1.2.3-18-g5258 From c8fb13d04ad0d08772f637bee873e620fcc064c2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 27 May 2011 13:56:12 -0700 Subject: OMAP: PM debug: fix section mismatch warnings WARNING: arch/arm/mach-omap2/built-in.o(.text+0x423c): Section mismatch in reference from the function pm_dbg_regset_init() to the function .init.text:pm_dbg_init() The function pm_dbg_regset_init() references the function __init pm_dbg_init(). This is often because pm_dbg_regset_init lacks a __init annotation or the annotation of pm_dbg_init is wrong. Signed-off-by: Russell King Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/pm-debug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index a5a83b358dd..e01da45c053 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c @@ -189,7 +189,7 @@ static struct dentry *pm_dbg_dir; static int pm_dbg_init_done; -static int __init pm_dbg_init(void); +static int pm_dbg_init(void); enum { DEBUG_FILE_COUNTERS = 0, @@ -595,7 +595,7 @@ static int option_set(void *data, u64 val) DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n"); -static int __init pm_dbg_init(void) +static int pm_dbg_init(void) { int i; struct dentry *d; -- cgit v1.2.3-18-g5258 From 345f79b3de7f6d651e4dba794af7c7303bdfd649 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Tue, 31 May 2011 16:08:09 -0700 Subject: OMAP: PM: omap_device: fix device power domain callbacks After commit 4d27e9dcff00a6425d779b065ec8892e4f391661 (PM: Make power domain callbacks take precedence over subsystem ones), the power domain callbacks need to call the driver callbacks instead of relying on the default subsystem (in this case, platform_bus) to handle the driver callbacks. Validated on 3430/n900, 3530/Overo. Signed-off-by: Kevin Hilman --- arch/arm/plat-omap/omap_device.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index a37b8eb65b7..49fc0df0c21 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c @@ -84,6 +84,7 @@ #include #include #include +#include #include #include @@ -539,20 +540,34 @@ int omap_early_device_register(struct omap_device *od) static int _od_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); + int ret; + + ret = pm_generic_runtime_suspend(dev); + + if (!ret) + omap_device_idle(pdev); + + return ret; +} - return omap_device_idle(pdev); +static int _od_runtime_idle(struct device *dev) +{ + return pm_generic_runtime_idle(dev); } static int _od_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); - return omap_device_enable(pdev); + omap_device_enable(pdev); + + return pm_generic_runtime_resume(dev); } static struct dev_power_domain omap_device_power_domain = { .ops = { .runtime_suspend = _od_runtime_suspend, + .runtime_idle = _od_runtime_idle, .runtime_resume = _od_runtime_resume, USE_PLATFORM_PM_SLEEP_OPS } -- cgit v1.2.3-18-g5258 From 470f22975448a65a1084a6f0721fa5df15323f02 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 27 May 2011 19:04:03 -0700 Subject: ARM: SAMSUNG: serial: Fix on handling of one clock source for UART This patch fixes the way of comparison for handling of two or more clock sources for UART. For example, if just only one clock source is defined even though there are two clock sources for UART, the serial driver does not set proper clock up. Of course, it is problem. So this patch changes the condition of comparison to avoid useless setup clock and adds a flag 'NO_NEED_CHECK_CLKSRC' which means selection of source clock is not required. In addition, since the Exynos4210 has only one clock source for UART this patch adds the flag into its common_init_uarts(). Signed-off-by: Boojin Kim Signed-off-by: Kukjin Kim Cc: stable Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-exynos4/init.c | 1 + arch/arm/plat-samsung/include/plat/regs-serial.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-exynos4/init.c b/arch/arm/mach-exynos4/init.c index cf91f50e43a..a8a83e3881a 100644 --- a/arch/arm/mach-exynos4/init.c +++ b/arch/arm/mach-exynos4/init.c @@ -35,6 +35,7 @@ void __init exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no) tcfg->clocks = exynos4_serial_clocks; tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks); } + tcfg->flags |= NO_NEED_CHECK_CLKSRC; } s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no); diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h index c151c5f94a8..116edfe120b 100644 --- a/arch/arm/plat-samsung/include/plat/regs-serial.h +++ b/arch/arm/plat-samsung/include/plat/regs-serial.h @@ -224,6 +224,8 @@ #define S5PV210_UFSTAT_RXMASK (255<<0) #define S5PV210_UFSTAT_RXSHIFT (0) +#define NO_NEED_CHECK_CLKSRC 1 + #ifndef __ASSEMBLY__ /* struct s3c24xx_uart_clksrc -- cgit v1.2.3-18-g5258 From 9d5ae7cd6cb9ead43336fec1094184d1dc740fbd Mon Sep 17 00:00:00 2001 From: Grazvydas Ignotas Date: Fri, 3 Jun 2011 20:24:03 +0000 Subject: omap: pandora: fix NAND support Commit d5ce2b65 "omap3630: nand: fix device size to work in polled mode" changed values for .devsize in nand platform data, now we have to pass NAND_BUSWIDTH_16 instead of '1' to select 16bit NAND. Update pandora's platform data accordingly, also specify appropriate transfer type. Signed-off-by: Grazvydas Ignotas Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/board-omap3pandora.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index 1d10736c6d3..a3d655c0a49 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c @@ -86,7 +86,8 @@ static struct mtd_partition omap3pandora_nand_partitions[] = { static struct omap_nand_platform_data pandora_nand_data = { .cs = 0, - .devsize = 1, /* '0' for 8-bit, '1' for 16-bit device */ + .devsize = NAND_BUSWIDTH_16, + .xfer_type = NAND_OMAP_PREFETCH_DMA, .parts = omap3pandora_nand_partitions, .nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions), }; -- cgit v1.2.3-18-g5258 From e9e35c5a2b2c803b5e2f25906d8ffe110670ceb6 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Tue, 14 Jun 2011 05:53:18 -0700 Subject: OMAP1: PM: register notifiers with generic clock ops even when !PM_RUNTIME When runtime PM is disabled, device clocks need to be enabled on device add and disabled on device remove. This currently is not happening because in the !PM_RUNTIME case, no notifiers are registered for OMAP1 devices. Fix this by ensuring notifiers are registered, even in the !PM_RUNTIME case. Reported-by: Janusz Krzysztofik Tested-by: Janusz Krzysztofik Signed-off-by: Kevin Hilman Signed-off-by: Tony Lindgren --- arch/arm/mach-omap1/Makefile | 4 ++-- arch/arm/mach-omap1/pm_bus.c | 8 ++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile index af98117043d..5b114d1558c 100644 --- a/arch/arm/mach-omap1/Makefile +++ b/arch/arm/mach-omap1/Makefile @@ -4,14 +4,14 @@ # Common support obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o -obj-y += clock.o clock_data.o opp_data.o reset.o +obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o # Power Management -obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o +obj-$(CONFIG_PM) += pm.o sleep.o # DSP obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c index fe31d933f0e..334fb8871bc 100644 --- a/arch/arm/mach-omap1/pm_bus.c +++ b/arch/arm/mach-omap1/pm_bus.c @@ -56,9 +56,13 @@ static struct dev_power_domain default_power_domain = { USE_PLATFORM_PM_SLEEP_OPS }, }; +#define OMAP1_PWR_DOMAIN (&default_power_domain) +#else +#define OMAP1_PWR_DOMAIN NULL +#endif /* CONFIG_PM_RUNTIME */ static struct pm_clk_notifier_block platform_bus_notifier = { - .pwr_domain = &default_power_domain, + .pwr_domain = OMAP1_PWR_DOMAIN, .con_ids = { "ick", "fck", NULL, }, }; @@ -72,4 +76,4 @@ static int __init omap1_pm_runtime_init(void) return 0; } core_initcall(omap1_pm_runtime_init); -#endif /* CONFIG_PM_RUNTIME */ + -- cgit v1.2.3-18-g5258 From 158f1e95180d01ebfd7cd5c8de23050528303f26 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 14 Jun 2011 17:06:02 -0700 Subject: gpio: include linux/gpio.h where needed Some files use GPIOF_ macros but don't include the header file for them. These macros are being moved to , so add includes for where needed. Signed-off-by: Randy Dunlap Signed-off-by: Grant Likely --- arch/arm/mach-pxa/spitz_pm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c index 7fe74067d85..094279aefe9 100644 --- a/arch/arm/mach-pxa/spitz_pm.c +++ b/arch/arm/mach-pxa/spitz_pm.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3-18-g5258 From 1a7d4369b3fe1f8e5efe7f11a1c482055693852f Mon Sep 17 00:00:00 2001 From: Shreshtha Kumar Sahu Date: Mon, 13 Jun 2011 10:11:44 +0200 Subject: amba pl011: platform data for reg lockup and glitch v2 This patch provides platform data for following - uart reset function to assist uart register lockup workaround - init/exit function to fix glitch in the tx pin in tty_open when tty port0 is opened a glitch is seen in the tx line of uart0. This happens in pl011_startup() when tx fifo interrupt is provoked into asserting. Now uart0 pins are enabled (alt function) only when init is complete and turned back to gpio when closed. Signed-off-by: Shreshtha Kumar Sahu Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-ux500/board-mop500-pins.c | 16 +++++++--- arch/arm/mach-ux500/board-mop500.c | 54 +++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-ux500/board-mop500-pins.c b/arch/arm/mach-ux500/board-mop500-pins.c index fd4cf1ca5ef..70cdbd60596 100644 --- a/arch/arm/mach-ux500/board-mop500-pins.c +++ b/arch/arm/mach-ux500/board-mop500-pins.c @@ -110,10 +110,18 @@ static pin_cfg_t mop500_pins_common[] = { GPIO168_KP_O0, /* UART */ - GPIO0_U0_CTSn | PIN_INPUT_PULLUP, - GPIO1_U0_RTSn | PIN_OUTPUT_HIGH, - GPIO2_U0_RXD | PIN_INPUT_PULLUP, - GPIO3_U0_TXD | PIN_OUTPUT_HIGH, + /* uart-0 pins gpio configuration should be + * kept intact to prevent glitch in tx line + * when tty dev is opened. Later these pins + * are configured to uart mop500_pins_uart0 + * + * It will be replaced with uart configuration + * once the issue is solved. + */ + GPIO0_GPIO | PIN_INPUT_PULLUP, + GPIO1_GPIO | PIN_OUTPUT_HIGH, + GPIO2_GPIO | PIN_INPUT_PULLUP, + GPIO3_GPIO | PIN_OUTPUT_HIGH, GPIO29_U2_RXD | PIN_INPUT_PULLUP, GPIO30_U2_TXD | PIN_OUTPUT_HIGH, diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index bb26f40493e..2a08c07dec6 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -27,18 +27,21 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include #include +#include "pins-db8500.h" #include "ste-dma40-db8500.h" #include "devices-db8500.h" #include "board-mop500.h" @@ -393,12 +396,63 @@ static struct stedma40_chan_cfg uart2_dma_cfg_tx = { }; #endif + +static pin_cfg_t mop500_pins_uart0[] = { + GPIO0_U0_CTSn | PIN_INPUT_PULLUP, + GPIO1_U0_RTSn | PIN_OUTPUT_HIGH, + GPIO2_U0_RXD | PIN_INPUT_PULLUP, + GPIO3_U0_TXD | PIN_OUTPUT_HIGH, +}; + +#define PRCC_K_SOFTRST_SET 0x18 +#define PRCC_K_SOFTRST_CLEAR 0x1C +static void ux500_uart0_reset(void) +{ + void __iomem *prcc_rst_set, *prcc_rst_clr; + + prcc_rst_set = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE + + PRCC_K_SOFTRST_SET); + prcc_rst_clr = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE + + PRCC_K_SOFTRST_CLEAR); + + /* Activate soft reset PRCC_K_SOFTRST_CLEAR */ + writel((readl(prcc_rst_clr) | 0x1), prcc_rst_clr); + udelay(1); + + /* Release soft reset PRCC_K_SOFTRST_SET */ + writel((readl(prcc_rst_set) | 0x1), prcc_rst_set); + udelay(1); +} + +static void ux500_uart0_init(void) +{ + int ret; + + ret = nmk_config_pins(mop500_pins_uart0, + ARRAY_SIZE(mop500_pins_uart0)); + if (ret < 0) + pr_err("pl011: uart pins_enable failed\n"); +} + +static void ux500_uart0_exit(void) +{ + int ret; + + ret = nmk_config_pins_sleep(mop500_pins_uart0, + ARRAY_SIZE(mop500_pins_uart0)); + if (ret < 0) + pr_err("pl011: uart pins_disable failed\n"); +} + static struct amba_pl011_data uart0_plat = { #ifdef CONFIG_STE_DMA40 .dma_filter = stedma40_filter, .dma_rx_param = &uart0_dma_cfg_rx, .dma_tx_param = &uart0_dma_cfg_tx, #endif + .init = ux500_uart0_init, + .exit = ux500_uart0_exit, + .reset = ux500_uart0_reset, }; static struct amba_pl011_data uart1_plat = { -- cgit v1.2.3-18-g5258 From 2bc58a6fd76f89052c7f151d78fb2d8b804aacfe Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Mon, 13 Jun 2011 06:46:44 +0100 Subject: ARM: 6959/1: SMP build fix for entry-macro-multi.S The assembly code in entry-macro-multi.S does not build without the include asm/assembler.h in the case of CONFIG_SMP=y. Fixes the rather theoretical SMP build of mach-shmobile/entry-intc.c: arch/arm/include/asm/entry-macro-multi.S: Assembler messages: arch/arm/include/asm/entry-macro-multi.S:20: Error: bad instruction `alt_smp(test_for_ipi r0,r6,r5,lr)' arch/arm/include/asm/entry-macro-multi.S:20: Error: bad instruction `alt_up_b(9997f)' make[1]: *** [arch/arm/mach-shmobile/entry-intc.o] Error 1 make: *** [arch/arm/mach-shmobile] Error 2 make: *** Waiting for unfinished jobs.... Signed-off-by: Magnus Damm Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 4 ++++ arch/arm/include/asm/entry-macro-multi.S | 2 ++ 2 files changed, 6 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index bc2d2d75f70..65c3f2474f5 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -13,6 +13,9 @@ * Do not include any C declarations in this file - it is included by * assembler source. */ +#ifndef __ASM_ASSEMBLER_H__ +#define __ASM_ASSEMBLER_H__ + #ifndef __ASSEMBLY__ #error "Only include this from assembly code" #endif @@ -290,3 +293,4 @@ .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort .endm +#endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index ec0bbf79c71..2da8547de6d 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -1,3 +1,5 @@ +#include + /* * Interrupt handling. Preserves r7, r8, r9 */ -- cgit v1.2.3-18-g5258 From 343fda59823ca20f48578f816ec12e741214f509 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 16 Jun 2011 08:26:06 +0100 Subject: ARM: 6962/1: mach-h720x: fix build error The h7201/h7202 machines did not build since they define ARM_DMA_ZONE_OFFSET but do not select ZONE_DMA. Fix it up by selecting ZONE_DMA in their Kconfig. Cc: Sascha Hauer Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mach-h720x/Kconfig | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-h720x/Kconfig b/arch/arm/mach-h720x/Kconfig index 9b6982efbd2..abf356c0234 100644 --- a/arch/arm/mach-h720x/Kconfig +++ b/arch/arm/mach-h720x/Kconfig @@ -6,12 +6,14 @@ config ARCH_H7201 bool "gms30c7201" depends on ARCH_H720X select CPU_H7201 + select ZONE_DMA help Say Y here if you are using the Hynix GMS30C7201 Reference Board config ARCH_H7202 bool "hms30c7202" select CPU_H7202 + select ZONE_DMA depends on ARCH_H720X help Say Y here if you are using the Hynix HMS30C7202 Reference Board -- cgit v1.2.3-18-g5258 From 9a00318eadbb43db4e9c163c262a22a3c8b5a672 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 16 Jun 2011 12:09:37 +0100 Subject: ARM: 6963/1: Thumb-2: Relax relocation requirements for non-function symbols The "Thumb bit" of a symbol is only really meaningful for function symbols (STT_FUNC). However, sometimes a branch is relocated against a non-function symbol; for example, PC-relative branches to anonymous assembler local symbols are typically fixed up against the start-of-section symbol, which is not a function symbol. Some inline assembler generates references of this type, such as fixup code generated by macros in . The existing relocation code for R_ARM_THM_CALL/R_ARM_THM_JUMP24 interprets this case as an error, because the target symbol appears to be an ARM symbol; but this is really not the case, since the target symbol is just a base in these cases. The addend defines the precise offset to the target location, but since the addend is encoded in a non-interworking Thumb branch instruction, there is no explicit Thumb bit in the addend. Because these instructions never interwork, the implied Thumb bit in the addend is 1, and the destination is Thumb by definition. This patch removes the extraneous Thumb bit check for non-function symbols, enabling modules containing the affected relocation types to be loaded. No modification to the actual relocation code is required, since this code does not take bit[0] of the location->destination offset into account in any case. Function symbols are always checked for interworking conflicts, as before. Signed-off-by: Dave Martin Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/module.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index fee7c36349e..016d6a0830a 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -193,8 +193,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, offset -= 0x02000000; offset += sym->st_value - loc; - /* only Thumb addresses allowed (no interworking) */ - if (!(offset & 1) || + /* + * For function symbols, only Thumb addresses are + * allowed (no interworking). + * + * For non-function symbols, the destination + * has no specific ARM/Thumb disposition, so + * the branch is resolved under the assumption + * that interworking is not required. + */ + if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC && + !(offset & 1)) || offset <= (s32)0xff000000 || offset >= (s32)0x01000000) { pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", -- cgit v1.2.3-18-g5258 From 650f156775c2638cc02ed7df31186a09ba79666a Mon Sep 17 00:00:00 2001 From: Jeff Ohlstein Date: Fri, 17 Jun 2011 13:55:38 -0700 Subject: msm: timer: compensate for timer shift in msm_read_timer_count Some msm targets have timers whose lower bits are unreliable. So, we present our timers as lower frequency than they actually are, and ignore the bottom 5 bits on such targets. This compensation was erroneously removed from the msm_read_timer_count function, so restore it. This was broken by 94790ec25 "msm: timer: SMP timer support for msm". Signed-off-by: Jeff Ohlstein --- arch/arm/mach-msm/timer.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 9bfdd5ad244..2232032181b 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -102,7 +102,11 @@ static cycle_t msm_read_timer_count(struct clocksource *cs) { struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource); - return readl(clk->global_counter); + /* + * Shift timer count down by a constant due to unreliable lower bits + * on some targets. + */ + return readl(clk->global_counter) >> clk->shift; } static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt) -- cgit v1.2.3-18-g5258 From fdb9c3cd5124c9a6e4c824ed2bca5b4602e84a1a Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 21 Apr 2011 23:09:11 +0000 Subject: msm: timer: Fix DGT rate on 8960 and 8660 The DGT runs at 27 MHz divided by 4 on 8660 and 8960. Signed-off-by: Stephen Boyd Signed-off-by: David Brown --- arch/arm/mach-msm/timer.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 2232032181b..63621f152c9 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -57,10 +57,12 @@ enum timer_location { #if defined(CONFIG_ARCH_QSD8X50) #define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ #define MSM_DGT_SHIFT (0) -#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \ - defined(CONFIG_ARCH_MSM8960) +#elif defined(CONFIG_ARCH_MSM7X30) #define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */ #define MSM_DGT_SHIFT (0) +#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) +#define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */ +#define MSM_DGT_SHIFT (0) #else #define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */ #define MSM_DGT_SHIFT (5) -- cgit v1.2.3-18-g5258 From 5a5685525dbadbe31b8efb113c0d41be8cddda09 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Fri, 17 Jun 2011 04:33:13 +0000 Subject: ARM: mach-shmobile: mackerel: change usbhs devices order USB1 can use IRQ interrupt and notify function for usbhs driver, but USB0 is using polling for it. The priority of usbhs devices order USB1 > USB0 is good idea Signed-off-by: Kuninori Morimoto Reviewed-by: Simon Horman Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/board-mackerel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 7e1d3758432..3802f2afabe 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1287,9 +1287,9 @@ static struct platform_device *mackerel_devices[] __initdata = { &nor_flash_device, &smc911x_device, &lcdc_device, - &usbhs0_device, &usb1_host_device, &usbhs1_device, + &usbhs0_device, &leds_device, &fsi_device, &fsi_ak4643_device, -- cgit v1.2.3-18-g5258 From 9e05cdde0c6bb8c3c3ee12e6d6123c6f9f85eea6 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Mon, 20 Jun 2011 23:00:11 +0000 Subject: ARM: mach-shmobile: ag5evm: consistently name sdhi info structures Name the SDHI1 instance sh_sdhi1_info to be consistent with sh_sdhi0_info. Signed-off-by: Simon Horman Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/board-ag5evm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index 1e2aba23e0d..ce5c2513c6c 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c @@ -381,7 +381,7 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state) gpio_set_value(GPIO_PORT114, state); } -static struct sh_mobile_sdhi_info sh_sdhi1_platdata = { +static struct sh_mobile_sdhi_info sh_sdhi1_info = { .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ, .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, @@ -413,7 +413,7 @@ static struct platform_device sdhi1_device = { .name = "sh_mobile_sdhi", .id = 1, .dev = { - .platform_data = &sh_sdhi1_platdata, + .platform_data = &sh_sdhi1_info, }, .num_resources = ARRAY_SIZE(sdhi1_resources), .resource = sdhi1_resources, -- cgit v1.2.3-18-g5258 From 573619d165b85152eeddd3b3871002c48cd94e42 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 20 Jun 2011 16:46:01 +0100 Subject: ARM: SMP: wait for CPU to be marked active When we bring a CPU online, we should wait for it to become active before entering the idle thread, so we know that the scheduler and thread migration is going to work. Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 344e52b16c8..e7f92a4321f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -318,9 +318,13 @@ asmlinkage void __cpuinit secondary_start_kernel(void) smp_store_cpu_info(cpu); /* - * OK, now it's safe to let the boot CPU continue + * OK, now it's safe to let the boot CPU continue. Wait for + * the CPU migration code to notice that the CPU is online + * before we continue. */ set_cpu_online(cpu, true); + while (!cpu_active(cpu)) + cpu_relax(); /* * OK, it's off to the idle thread for us -- cgit v1.2.3-18-g5258 From 946a105e16651c35e9cc670bff23812761f1ad35 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 14 Jun 2011 14:20:44 +0100 Subject: ARM: 6961/1: zImage: Add build-time check for correctly-sized proc_type entries It is easy to mis-maintain the proc_types table such that the entries become wrongly-sized and misaligned when the kernel is built in Thumb-2. This patch adds an assembly-time check which will turn most common size/alignment mistakes in this table into build failures, to avoid having to debug the boot-time kernel hang which would happen if the resulting kernel were actually booted. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/boot/compressed/head.S | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 942fad97e44..940b2017810 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -597,6 +597,8 @@ __common_mmu_cache_on: sub pc, lr, r0, lsr #32 @ properly flush pipeline #endif +#define PROC_ENTRY_SIZE (4*5) + /* * Here follow the relocatable cache support functions for the * various processors. This is a generic hook for locating an @@ -624,7 +626,7 @@ call_cache_fn: adr r12, proc_types ARM( addeq pc, r12, r3 ) @ call cache function THUMB( addeq r12, r3 ) THUMB( moveq pc, r12 ) @ call cache function - add r12, r12, #4*5 + add r12, r12, #PROC_ENTRY_SIZE b 1b /* @@ -794,6 +796,16 @@ proc_types: .size proc_types, . - proc_types + /* + * If you get a "non-constant expression in ".if" statement" + * error from the assembler on this line, check that you have + * not accidentally written a "b" instruction where you should + * have written W(b). + */ + .if (. - proc_types) % PROC_ENTRY_SIZE != 0 + .error "The size of one or more proc_types entries is wrong." + .endif + /* * Turn off the Cache and MMU. ARMv3 does not support * reading the control register, but ARMv4 does. -- cgit v1.2.3-18-g5258 From 082763a80ad11adbe8418fff1cfe466343035b7a Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 20 Jun 2011 08:13:00 +0100 Subject: ARM: 6969/1: plat-iop: fix build error The iop13xx_defconfig didn't build since the platform code uses defines from . Simply add the include so it compiles. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/plat-iop/cp6.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/plat-iop/cp6.c b/arch/arm/plat-iop/cp6.c index 9612a87e2a8..bab73e2c79d 100644 --- a/arch/arm/plat-iop/cp6.c +++ b/arch/arm/plat-iop/cp6.c @@ -18,6 +18,7 @@ */ #include #include +#include static int cp6_trap(struct pt_regs *regs, unsigned int instr) { -- cgit v1.2.3-18-g5258 From 7a0ee92b4a510bc2dd026333f90031e883e0cde0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 23 Jun 2011 22:00:20 +0100 Subject: ARM: pm: proc-v7: fix missing struct processor pointers for suspend code Add the missing suspend/resume pointers for the suspend code. This is needed when building for multiple CPUs. Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/mm/proc-v7.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3c3867850a3..e27c011a759 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -418,9 +418,9 @@ ENTRY(v7_processor_functions) .word cpu_v7_dcache_clean_area .word cpu_v7_switch_mm .word cpu_v7_set_pte_ext - .word 0 - .word 0 - .word 0 + .word cpu_v7_suspend_size + .word cpu_v7_do_suspend + .word cpu_v7_do_resume .size v7_processor_functions, . - v7_processor_functions .section ".rodata" -- cgit v1.2.3-18-g5258 From 111b20d01346b9635b3223c7af4e40e43bee8dc6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 22 Jun 2011 15:41:58 +0100 Subject: ARM: pm: ensure ARMv7 CPUs save and restore the TLS register Ensure that the TLS register is saved and restored over a suspend cycle, so that userspace programs don't see a corrupted TLS value. Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/mm/proc-v7.S | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index e27c011a759..089c0b5e454 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -210,19 +210,21 @@ cpu_v7_name: /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size -.equ cpu_v7_suspend_size, 4 * 8 +.equ cpu_v7_suspend_size, 4 * 9 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_v7_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID mrc p15, 0, r5, c13, c0, 1 @ Context ID + mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID + stmia r0!, {r4 - r6} mrc p15, 0, r6, c3, c0, 0 @ Domain ID mrc p15, 0, r7, c2, c0, 0 @ TTB 0 mrc p15, 0, r8, c2, c0, 1 @ TTB 1 mrc p15, 0, r9, c1, c0, 0 @ Control register mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control - stmia r0, {r4 - r11} + stmia r0, {r6 - r11} ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_v7_do_suspend) @@ -230,9 +232,11 @@ ENTRY(cpu_v7_do_resume) mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache - ldmia r0, {r4 - r11} + ldmia r0!, {r4 - r6} mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID mcr p15, 0, r5, c13, c0, 1 @ Context ID + mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID + ldmia r0, {r6 - r11} mcr p15, 0, r6, c3, c0, 0 @ Domain ID mcr p15, 0, r7, c2, c0, 0 @ TTB 0 mcr p15, 0, r8, c2, c0, 1 @ TTB 1 -- cgit v1.2.3-18-g5258 From b69874e4f530b0103e507f695c010d00cb85a4df Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jun 2011 18:57:31 +0100 Subject: ARM: pm: arrange for cpu_proc_init() to be called on resume cpu_proc_init() does processor specific initialization, which we do at boot time. We have been omitting to do this on resume, which causes some of this initialization to be skipped. We've also been skipping this on SMP initialization too. Ensure that cpu_proc_init() is always called appropriately by moving it into cpu_init(), and move cpu_init() to a more appropriate point in the boot initialization. Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 99 ++++++++++++++++++++++++----------------------- arch/arm/mm/proc-sa1100.S | 4 +- 2 files changed, 51 insertions(+), 52 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index ed11fb08b05..edcab02be64 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -342,54 +342,6 @@ static void __init feat_v6_fixup(void) elf_hwcap &= ~HWCAP_TLS; } -static void __init setup_processor(void) -{ - struct proc_info_list *list; - - /* - * locate processor in the list of supported processor - * types. The linker builds this table for us from the - * entries in arch/arm/mm/proc-*.S - */ - list = lookup_processor_type(read_cpuid_id()); - if (!list) { - printk("CPU configuration botched (ID %08x), unable " - "to continue.\n", read_cpuid_id()); - while (1); - } - - cpu_name = list->cpu_name; - -#ifdef MULTI_CPU - processor = *list->proc; -#endif -#ifdef MULTI_TLB - cpu_tlb = *list->tlb; -#endif -#ifdef MULTI_USER - cpu_user = *list->user; -#endif -#ifdef MULTI_CACHE - cpu_cache = *list->cache; -#endif - - printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", - cpu_name, read_cpuid_id(), read_cpuid_id() & 15, - proc_arch[cpu_architecture()], cr_alignment); - - sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); - sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); - elf_hwcap = list->elf_hwcap; -#ifndef CONFIG_ARM_THUMB - elf_hwcap &= ~HWCAP_THUMB; -#endif - - feat_v6_fixup(); - - cacheid_init(); - cpu_proc_init(); -} - /* * cpu_init - initialise one CPU. * @@ -405,6 +357,8 @@ void cpu_init(void) BUG(); } + cpu_proc_init(); + /* * Define the placement constraint for the inline asm directive below. * In Thumb-2, msr with an immediate value is not allowed. @@ -441,6 +395,54 @@ void cpu_init(void) : "r14"); } +static void __init setup_processor(void) +{ + struct proc_info_list *list; + + /* + * locate processor in the list of supported processor + * types. The linker builds this table for us from the + * entries in arch/arm/mm/proc-*.S + */ + list = lookup_processor_type(read_cpuid_id()); + if (!list) { + printk("CPU configuration botched (ID %08x), unable " + "to continue.\n", read_cpuid_id()); + while (1); + } + + cpu_name = list->cpu_name; + +#ifdef MULTI_CPU + processor = *list->proc; +#endif +#ifdef MULTI_TLB + cpu_tlb = *list->tlb; +#endif +#ifdef MULTI_USER + cpu_user = *list->user; +#endif +#ifdef MULTI_CACHE + cpu_cache = *list->cache; +#endif + + printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", + cpu_name, read_cpuid_id(), read_cpuid_id() & 15, + proc_arch[cpu_architecture()], cr_alignment); + + sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); + sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); + elf_hwcap = list->elf_hwcap; +#ifndef CONFIG_ARM_THUMB + elf_hwcap &= ~HWCAP_THUMB; +#endif + + feat_v6_fixup(); + + cacheid_init(); + cpu_init(); +} + void __init dump_machine_table(void) { struct machine_desc *p; @@ -913,7 +915,6 @@ void __init setup_arch(char **cmdline_p) #endif reserve_crashkernel(); - cpu_init(); tcm_init(); #ifdef CONFIG_MULTI_IRQ_HANDLER diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 184a9c997e3..e9c47271732 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -34,7 +34,7 @@ */ #define DCACHELINESIZE 32 - __INIT + .section .text /* * cpu_sa1100_proc_init() @@ -45,8 +45,6 @@ ENTRY(cpu_sa1100_proc_init) mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland mov pc, lr - .section .text - /* * cpu_sa1100_proc_fin() * -- cgit v1.2.3-18-g5258 From 3125af241cdb5a2421aad9f710b2744228a79084 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jun 2011 18:59:33 +0100 Subject: ARM: pm: sa1100: no need to re-enable clock switching This is now taken care of by calling cpu_proc_init() in the resume path, so eliminate this unnecessary call. Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/mach-sa1100/sleep.S | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S index 04f2a618d4e..122ab3c6692 100644 --- a/arch/arm/mach-sa1100/sleep.S +++ b/arch/arm/mach-sa1100/sleep.S @@ -147,5 +147,4 @@ sa1110_sdram_controller_fix: */ .align 5 sa1100_cpu_resume: - mcr p15, 0, r1, c15, c1, 2 @ enable clock switching ldmfd sp!, {r4 - r12, pc} @ return to caller -- cgit v1.2.3-18-g5258 From 6b5f6ab0e1c33beaed828271f13c03ed02ee3c15 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 13:39:44 +0100 Subject: ARM: pm: make MULTI_CPU and !MULTI_CPU resume paths the same Eliminate the differences between MULTI_CPU and non-MULTI_CPU resume paths, making the saved structure identical irrespective of the way the kernel was configured. Acked-by: Frank Hofmann Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/kernel/sleep.S | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 6398ead9d1c..97a6577aa61 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -40,9 +40,11 @@ ENTRY(cpu_suspend) #else mov r2, sp @ current virtual SP ldr r0, =cpu_suspend_size + ldr ip, =cpu_do_resume sub sp, sp, r0 @ allocate CPU state on stack mov r0, sp @ save pointer - stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn + add ip, ip, r1 @ convert resume fn to phys + stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn ldr r3, =sleep_save_sp add r2, sp, r1 @ convert SP to phys #ifdef CONFIG_SMP @@ -120,20 +122,12 @@ ENTRY(cpu_resume) ldr r0, sleep_save_sp @ stack phys addr #endif setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off -#ifdef MULTI_CPU @ load v:p, stack, return fn, resume fn ARM( ldmia r0!, {r1, sp, lr, pc} ) THUMB( ldmia r0!, {r1, r2, r3, r4} ) THUMB( mov sp, r2 ) THUMB( mov lr, r3 ) THUMB( bx r4 ) -#else - @ load v:p, stack, return fn - ARM( ldmia r0!, {r1, sp, lr} ) -THUMB( ldmia r0!, {r1, r2, lr} ) -THUMB( mov sp, r2 ) - b cpu_do_resume -#endif ENDPROC(cpu_resume) sleep_save_sp: -- cgit v1.2.3-18-g5258 From 2fefbcd58590cf33189c6178098e12b31b994b5f Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 13:45:34 +0100 Subject: ARM: pm: move return address (for cpu_resume) to top of stack Move the return address for cpu_resume to the top of stack so that cpu_resume looks more like a normal function. Acked-by: Frank Hofmann Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/kernel/sleep.S | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 97a6577aa61..f8e92513c1b 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -15,6 +15,7 @@ * r0-r3,r9,r10,lr corrupted */ ENTRY(cpu_suspend) + stmfd sp!, {r3} mov r9, lr #ifdef MULTI_CPU ldr r10, =processor @@ -24,7 +25,7 @@ ENTRY(cpu_suspend) sub sp, sp, r0 @ allocate CPU state on stack mov r0, sp @ save pointer add ip, ip, r1 @ convert resume fn to phys - stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn + stmfd sp!, {r1, r2, ip} @ save v:p, virt SP, phys resume fn ldr r3, =sleep_save_sp add r2, sp, r1 @ convert SP to phys #ifdef CONFIG_SMP @@ -44,7 +45,7 @@ ENTRY(cpu_suspend) sub sp, sp, r0 @ allocate CPU state on stack mov r0, sp @ save pointer add ip, ip, r1 @ convert resume fn to phys - stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn + stmfd sp!, {r1, r2, ip} @ save v:p, virt SP, phys resume fn ldr r3, =sleep_save_sp add r2, sp, r1 @ convert SP to phys #ifdef CONFIG_SMP @@ -99,7 +100,7 @@ ENDPROC(cpu_resume_turn_mmu_on) cpu_resume_after_mmu: str r5, [r2, r4, lsl #2] @ restore old mapping mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache - mov pc, lr + ldmfd sp!, {pc} ENDPROC(cpu_resume_after_mmu) /* @@ -122,12 +123,11 @@ ENTRY(cpu_resume) ldr r0, sleep_save_sp @ stack phys addr #endif setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off - @ load v:p, stack, return fn, resume fn - ARM( ldmia r0!, {r1, sp, lr, pc} ) -THUMB( ldmia r0!, {r1, r2, r3, r4} ) + @ load v:p, stack, resume fn + ARM( ldmia r0!, {r1, sp, pc} ) +THUMB( ldmia r0!, {r1, r2, r3} ) THUMB( mov sp, r2 ) -THUMB( mov lr, r3 ) -THUMB( bx r4 ) +THUMB( bx r3 ) ENDPROC(cpu_resume) sleep_save_sp: -- cgit v1.2.3-18-g5258 From 3fd431bd0cbc75a506b90b42619de3b04fe813a7 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 13:53:06 +0100 Subject: ARM: pm: extract common code from MULTI_CPU/!MULTI_CPU paths Very little code is different between these two paths now, so extract the common code. Acked-by: Frank Hofmann Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/kernel/sleep.S | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index f8e92513c1b..0a778c30859 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -19,29 +19,13 @@ ENTRY(cpu_suspend) mov r9, lr #ifdef MULTI_CPU ldr r10, =processor - mov r2, sp @ current virtual SP ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function - sub sp, sp, r0 @ allocate CPU state on stack - mov r0, sp @ save pointer - add ip, ip, r1 @ convert resume fn to phys - stmfd sp!, {r1, r2, ip} @ save v:p, virt SP, phys resume fn - ldr r3, =sleep_save_sp - add r2, sp, r1 @ convert SP to phys -#ifdef CONFIG_SMP - ALT_SMP(mrc p15, 0, lr, c0, c0, 5) - ALT_UP(mov lr, #0) - and lr, lr, #15 - str r2, [r3, lr, lsl #2] @ save phys SP #else - str r2, [r3] @ save phys SP -#endif - mov lr, pc - ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state -#else - mov r2, sp @ current virtual SP ldr r0, =cpu_suspend_size ldr ip, =cpu_do_resume +#endif + mov r2, sp @ current virtual SP sub sp, sp, r0 @ allocate CPU state on stack mov r0, sp @ save pointer add ip, ip, r1 @ convert resume fn to phys @@ -56,6 +40,10 @@ ENTRY(cpu_suspend) #else str r2, [r3] @ save phys SP #endif +#ifdef MULTI_CPU + mov lr, pc + ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state +#else bl cpu_do_suspend #endif -- cgit v1.2.3-18-g5258 From 5fa94c812c0001ac7c3d8868e956ec514734a352 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 15:04:14 +0100 Subject: ARM: pm: preserve r4 - r11 across a suspend Make cpu_suspend()..return function preserve r4 to r11 across a suspend cycle. This is in preparation of relieving platform support code from this task. Acked-by: Frank Hofmann Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/kernel/sleep.S | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 0a778c30859..8dbca93417f 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -12,10 +12,11 @@ * r1 = v:p offset * r3 = virtual return function * Note: sp is decremented to allocate space for CPU state on stack - * r0-r3,r9,r10,lr corrupted + * r0-r3,ip,lr corrupted */ ENTRY(cpu_suspend) stmfd sp!, {r3} + stmfd sp!, {r4 - r11} mov r9, lr #ifdef MULTI_CPU ldr r10, =processor @@ -88,7 +89,7 @@ ENDPROC(cpu_resume_turn_mmu_on) cpu_resume_after_mmu: str r5, [r2, r4, lsl #2] @ restore old mapping mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache - ldmfd sp!, {pc} + ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_resume_after_mmu) /* -- cgit v1.2.3-18-g5258 From 8111eaa6d424ab3ba8a4d7a3148d4681ae5c6ae3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 15:25:11 +0100 Subject: ARM: pm: reallocate registers to avoid r2, r3 Avoid using r2 and r3 in the suspend code, allowing these to be passed further into the function as arguments. Acked-by: Frank Hofmann Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/kernel/sleep.S | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 8dbca93417f..358be13499d 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -20,26 +20,26 @@ ENTRY(cpu_suspend) mov r9, lr #ifdef MULTI_CPU ldr r10, =processor - ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state + ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function #else - ldr r0, =cpu_suspend_size + ldr r5, =cpu_suspend_size ldr ip, =cpu_do_resume #endif - mov r2, sp @ current virtual SP - sub sp, sp, r0 @ allocate CPU state on stack + mov r6, sp @ current virtual SP + sub sp, sp, r5 @ allocate CPU state on stack mov r0, sp @ save pointer add ip, ip, r1 @ convert resume fn to phys - stmfd sp!, {r1, r2, ip} @ save v:p, virt SP, phys resume fn - ldr r3, =sleep_save_sp - add r2, sp, r1 @ convert SP to phys + stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn + ldr r5, =sleep_save_sp + add r6, sp, r1 @ convert SP to phys #ifdef CONFIG_SMP ALT_SMP(mrc p15, 0, lr, c0, c0, 5) ALT_UP(mov lr, #0) and lr, lr, #15 - str r2, [r3, lr, lsl #2] @ save phys SP + str r6, [r5, lr, lsl #2] @ save phys SP #else - str r2, [r3] @ save phys SP + str r6, [r5] @ save phys SP #endif #ifdef MULTI_CPU mov lr, pc -- cgit v1.2.3-18-g5258 From 3799bbe57843d279008c9ec3406838966cad5f15 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 15:28:40 +0100 Subject: ARM: pm: rejig suspend follow-on function calling convention Save the suspend function pointer onto the stack for use when returning. Allocate r2 to pass an argument to the suspend function. Acked-by: Frank Hofmann Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/kernel/sleep.S | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 358be13499d..b924bcc32dc 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -10,6 +10,7 @@ /* * Save CPU state for a suspend * r1 = v:p offset + * r2 = suspend function arg0 * r3 = virtual return function * Note: sp is decremented to allocate space for CPU state on stack * r0-r3,ip,lr corrupted @@ -17,7 +18,6 @@ ENTRY(cpu_suspend) stmfd sp!, {r3} stmfd sp!, {r4 - r11} - mov r9, lr #ifdef MULTI_CPU ldr r10, =processor ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state @@ -33,6 +33,7 @@ ENTRY(cpu_suspend) stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn ldr r5, =sleep_save_sp add r6, sp, r1 @ convert SP to phys + stmfd sp!, {r2, lr} @ save suspend func arg and pointer #ifdef CONFIG_SMP ALT_SMP(mrc p15, 0, lr, c0, c0, 5) ALT_UP(mov lr, #0) @@ -51,12 +52,12 @@ ENTRY(cpu_suspend) @ flush data cache #ifdef MULTI_CACHE ldr r10, =cpu_cache - mov lr, r9 + mov lr, pc ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] #else - mov lr, r9 - b __cpuc_flush_kern_all + bl __cpuc_flush_kern_all #endif + ldmfd sp!, {r0, pc} @ call suspend fn ENDPROC(cpu_suspend) .ltorg -- cgit v1.2.3-18-g5258 From dbc125168fd7dda4ffb24a29548746c7bd3b3d87 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 15:52:47 +0100 Subject: ARM: pm: move sa1100 to use proper suspend func arg0 In the previous commit, we introduced an official way to supply an argument to the suspend function. Convert the sa1100 suspend code to use this method. Signed-off-by: Russell King --- arch/arm/mach-pxa/sleep.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S index 6f5368899d8..613ddfa2c29 100644 --- a/arch/arm/mach-pxa/sleep.S +++ b/arch/arm/mach-pxa/sleep.S @@ -59,7 +59,7 @@ ENTRY(pxa27x_cpu_suspend) mra r2, r3, acc0 #endif stmfd sp!, {r2 - r12, lr} @ save registers on stack - mov r4, r0 @ save sleep mode + mov r2, r0 @ save sleep mode ldr r3, =pxa_cpu_resume @ resume function bl cpu_suspend @@ -67,7 +67,7 @@ ENTRY(pxa27x_cpu_suspend) @ (also workaround for sighting 28071) @ prepare value for sleep mode - mov r1, r4 @ sleep mode + mov r1, r0 @ sleep mode @ prepare pointer to physical address 0 (virtual mapping in generic.c) mov r2, #UNCACHED_PHYS_0 @@ -109,11 +109,11 @@ ENTRY(pxa27x_cpu_suspend) ENTRY(pxa25x_cpu_suspend) stmfd sp!, {r2 - r12, lr} @ save registers on stack - mov r4, r0 @ save sleep mode + mov r2, r0 @ save sleep mode ldr r3, =pxa_cpu_resume @ resume function bl cpu_suspend @ prepare value for sleep mode - mov r1, r4 @ sleep mode + mov r1, r0 @ sleep mode @ prepare pointer to physical address 0 (virtual mapping in generic.c) mov r2, #UNCACHED_PHYS_0 -- cgit v1.2.3-18-g5258 From e8856a8797e76e6883ae81f8f9ecbb231cc535df Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 15:58:34 +0100 Subject: ARM: pm: convert cpu_suspend() to a normal function cpu_suspend() has a weird calling method which makes it only possible to call from assembly code: it returns with a modified stack pointer to finish the suspend, but on resume, it 'returns' via a provided pointer. We can make cpu_suspend() appear to be a normal function merely by swapping the resume pointer argument and the link register. Do so, and update all callers to take account of this more traditional behaviour. Acked-by: Frank Hofmann Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/include/asm/system.h | 2 ++ arch/arm/kernel/sleep.S | 10 ++++------ arch/arm/mach-exynos4/sleep.S | 7 +++---- arch/arm/mach-pxa/sleep.S | 13 ++++++++++--- arch/arm/mach-s3c64xx/sleep.S | 9 +++------ arch/arm/mach-s5pv210/sleep.S | 7 +++---- arch/arm/mach-sa1100/sleep.S | 13 +++---------- arch/arm/plat-s3c24xx/sleep.S | 10 +++------- 8 files changed, 31 insertions(+), 40 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 832888d0c20..50be6055df8 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -106,6 +106,8 @@ extern void __show_regs(struct pt_regs *); extern int cpu_architecture(void); extern void cpu_init(void); +extern void cpu_suspend(int, long, unsigned long, void (*)(unsigned long)); +extern void cpu_resume(void); void arm_machine_restart(char mode, const char *cmd); extern void (*arm_pm_restart)(char str, const char *cmd); diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index b924bcc32dc..e0626779fe9 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -11,13 +11,11 @@ * Save CPU state for a suspend * r1 = v:p offset * r2 = suspend function arg0 - * r3 = virtual return function - * Note: sp is decremented to allocate space for CPU state on stack - * r0-r3,ip,lr corrupted + * r3 = suspend function + * Note: does not return until system resumes */ ENTRY(cpu_suspend) - stmfd sp!, {r3} - stmfd sp!, {r4 - r11} + stmfd sp!, {r4 - r11, lr} #ifdef MULTI_CPU ldr r10, =processor ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state @@ -33,7 +31,7 @@ ENTRY(cpu_suspend) stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn ldr r5, =sleep_save_sp add r6, sp, r1 @ convert SP to phys - stmfd sp!, {r2, lr} @ save suspend func arg and pointer + stmfd sp!, {r2, r3} @ save suspend func arg and pointer #ifdef CONFIG_SMP ALT_SMP(mrc p15, 0, lr, c0, c0, 5) ALT_UP(mov lr, #0) diff --git a/arch/arm/mach-exynos4/sleep.S b/arch/arm/mach-exynos4/sleep.S index 6b62425417a..d9a2287b464 100644 --- a/arch/arm/mach-exynos4/sleep.S +++ b/arch/arm/mach-exynos4/sleep.S @@ -42,16 +42,15 @@ ENTRY(s3c_cpu_save) stmfd sp!, { r3 - r12, lr } - ldr r3, =resume_with_mmu + adr r3, BSYM(exynos4_finish_suspend) bl cpu_suspend + ldmfd sp!, { r3 - r12, pc } +exynos4_finish_suspend: ldr r0, =pm_cpu_sleep ldr r0, [ r0 ] mov pc, r0 -resume_with_mmu: - ldmfd sp!, { r3 - r12, pc } - .ltorg /* diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S index 613ddfa2c29..3a67887e6db 100644 --- a/arch/arm/mach-pxa/sleep.S +++ b/arch/arm/mach-pxa/sleep.S @@ -35,9 +35,11 @@ ENTRY(pxa3xx_cpu_suspend) #endif stmfd sp!, {r2 - r12, lr} @ save registers on stack mov r1, r0 - ldr r3, =pxa_cpu_resume @ resume function + adr r3, BSYM(pxa3xx_finish_suspend) bl cpu_suspend + b pxa_cpu_resume +pxa3xx_finish_suspend: mov r0, #0x06 @ S2D3C4 mode mcr p14, 0, r0, c7, c0, 0 @ enter sleep @@ -60,9 +62,11 @@ ENTRY(pxa27x_cpu_suspend) #endif stmfd sp!, {r2 - r12, lr} @ save registers on stack mov r2, r0 @ save sleep mode - ldr r3, =pxa_cpu_resume @ resume function + adr r3, BSYM(pxa27x_finish_suspend) bl cpu_suspend + b pxa_cpu_resume +pxa27x_finish_suspend: @ Put the processor to sleep @ (also workaround for sighting 28071) @@ -110,8 +114,11 @@ ENTRY(pxa27x_cpu_suspend) ENTRY(pxa25x_cpu_suspend) stmfd sp!, {r2 - r12, lr} @ save registers on stack mov r2, r0 @ save sleep mode - ldr r3, =pxa_cpu_resume @ resume function + adr r3, BSYM(pxa25x_finish_suspend) bl cpu_suspend + b pxa_cpu_resume + +pxa25x_finish_suspend: @ prepare value for sleep mode mov r1, r0 @ sleep mode diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S index 1f87732b232..dc4f5820210 100644 --- a/arch/arm/mach-s3c64xx/sleep.S +++ b/arch/arm/mach-s3c64xx/sleep.S @@ -36,18 +36,15 @@ ENTRY(s3c_cpu_save) stmfd sp!, { r4 - r12, lr } - ldr r3, =resume_with_mmu + adr r3, BSYM(s3c64xx_finish_suspend) bl cpu_suspend + ldmfd sp!, { r4 - r12, pc } +s3c64xx_finish_suspend: @@ call final suspend code ldr r0, =pm_cpu_sleep ldr pc, [r0] - @@ return to the caller, after the MMU is turned on. - @@ restore the last bits of the stack and return. -resume_with_mmu: - ldmfd sp!, { r4 - r12, pc } @ return, from sp from s3c_cpu_save - /* Sleep magic, the word before the resume entry point so that the * bootloader can check for a resumeable image. */ diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S index a3d649466fb..1182fc848a7 100644 --- a/arch/arm/mach-s5pv210/sleep.S +++ b/arch/arm/mach-s5pv210/sleep.S @@ -41,16 +41,15 @@ ENTRY(s3c_cpu_save) stmfd sp!, { r3 - r12, lr } - ldr r3, =resume_with_mmu + adr r3, BSYM(s5pv210_finish_suspend) bl cpu_suspend + ldmfd sp!, { r3 - r12, pc } +s5pv210_finish_suspend: ldr r0, =pm_cpu_sleep ldr r0, [ r0 ] mov pc, r0 -resume_with_mmu: - ldmfd sp!, { r3 - r12, pc } - .ltorg /* sleep magic, to allow the bootloader to check for an valid diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S index 122ab3c6692..f3fe39773f8 100644 --- a/arch/arm/mach-sa1100/sleep.S +++ b/arch/arm/mach-sa1100/sleep.S @@ -31,9 +31,11 @@ ENTRY(sa1100_cpu_suspend) stmfd sp!, {r4 - r12, lr} @ save registers on stack mov r1, r0 - ldr r3, =sa1100_cpu_resume @ return function + adr r3, BSYM(sa1100_finish_suspend) bl cpu_suspend + ldmfd sp!, {r4 - r12, pc} @ return to caller +sa1100_finish_suspend: @ disable clock switching mcr p15, 0, r1, c15, c2, 2 @@ -139,12 +141,3 @@ sa1110_sdram_controller_fix: str r13, [r12] 20: b 20b @ loop waiting for sleep - -/* - * cpu_sa1100_resume() - * - * entry point from bootloader into kernel during resume - */ - .align 5 -sa1100_cpu_resume: - ldmfd sp!, {r4 - r12, pc} @ return to caller diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S index fd7032f84ae..f822e6282dd 100644 --- a/arch/arm/plat-s3c24xx/sleep.S +++ b/arch/arm/plat-s3c24xx/sleep.S @@ -49,21 +49,17 @@ ENTRY(s3c_cpu_save) stmfd sp!, { r4 - r12, lr } - ldr r3, =resume_with_mmu + adr r3, BSYM(s3c24xx_finish_suspend) bl cpu_suspend + ldmfd sp!, { r4 - r12, pc } +s3c24xx_finish_suspend: @@ jump to final code to send system to sleep ldr r0, =pm_cpu_sleep @@ldr pc, [ r0 ] ldr r0, [ r0 ] mov pc, r0 - @@ return to the caller, after having the MMU - @@ turned on, this restores the last bits from the - @@ stack -resume_with_mmu: - ldmfd sp!, { r4 - r12, pc } - .ltorg /* sleep magic, to allow the bootloader to check for an valid -- cgit v1.2.3-18-g5258 From 14cd8fd574bce1cfbe510ccb1f73c7c1024d770f Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jun 2011 16:32:58 +0100 Subject: ARM: pm: move cpu_init() call into core code As we have core code dealing with CPU suspend/resume, we can re-initialize the CPUs exception banked registers via that code rather than having platforms deal with that level of detail. So, move the call to cpu_init() out of platform code into core code. Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/kernel/sleep.S | 1 + arch/arm/mach-pxa/pm.c | 1 - arch/arm/mach-sa1100/pm.c | 2 -- arch/arm/plat-samsung/pm.c | 4 ---- 4 files changed, 1 insertion(+), 7 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index e0626779fe9..53922748d10 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -88,6 +88,7 @@ ENDPROC(cpu_resume_turn_mmu_on) cpu_resume_after_mmu: str r5, [r2, r4, lsl #2] @ restore old mapping mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache + bl cpu_init @ restore the und/abt/irq banked regs ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_resume_after_mmu) diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index 51e1583265b..37178a8559b 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c @@ -42,7 +42,6 @@ int pxa_pm_enter(suspend_state_t state) /* *** go zzz *** */ pxa_cpu_pm_fns->enter(state); - cpu_init(); if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { /* after sleeping, validate the checksum */ diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index c4661aab22f..d35885ca97a 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c @@ -77,8 +77,6 @@ static int sa11x0_pm_enter(suspend_state_t state) /* go zzz */ sa1100_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); - cpu_init(); - /* * Ensure not to come back here if it wasn't intended */ diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index 5c0a440d6e1..3828191416b 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c @@ -300,10 +300,6 @@ static int s3c_pm_enter(suspend_state_t state) s3c_cpu_save(0, PLAT_PHYS_OFFSET - PAGE_OFFSET); - /* restore the cpu state using the kernel's cpu init code. */ - - cpu_init(); - /* restore the system state */ s3c_pm_restore_core(); -- cgit v1.2.3-18-g5258 From 34c79de6b2ea5bc5734d970851fb966b49d55a17 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jun 2011 16:26:29 +0100 Subject: ARM: pm: sa1100: move cpu_suspend into C code We don't need a veneer for cpu_suspend, it can be called directly from C code now. Move it into sa11x0_pm_enter() along with the re-enabling of clock switching. Signed-off-by: Russell King --- arch/arm/mach-sa1100/pm.c | 4 ++-- arch/arm/mach-sa1100/sleep.S | 11 ++--------- 2 files changed, 4 insertions(+), 11 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index d35885ca97a..259ed3bcc3f 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c @@ -32,7 +32,7 @@ #include #include -extern void sa1100_cpu_suspend(long); +extern void sa1100_finish_suspend(unsigned long); #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] @@ -75,7 +75,7 @@ static int sa11x0_pm_enter(suspend_state_t state) PSPR = virt_to_phys(cpu_resume); /* go zzz */ - sa1100_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); + cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, 0, sa1100_finish_suspend); /* * Ensure not to come back here if it wasn't intended diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S index f3fe39773f8..e8223315b44 100644 --- a/arch/arm/mach-sa1100/sleep.S +++ b/arch/arm/mach-sa1100/sleep.S @@ -22,20 +22,13 @@ .text /* - * sa1100_cpu_suspend() + * sa1100_finish_suspend() * * Causes sa11x0 to enter sleep state * */ -ENTRY(sa1100_cpu_suspend) - stmfd sp!, {r4 - r12, lr} @ save registers on stack - mov r1, r0 - adr r3, BSYM(sa1100_finish_suspend) - bl cpu_suspend - ldmfd sp!, {r4 - r12, pc} @ return to caller - -sa1100_finish_suspend: +ENTRY(sa1100_finish_suspend) @ disable clock switching mcr p15, 0, r1, c15, c2, 2 -- cgit v1.2.3-18-g5258 From c4ac82c07dd3e4fb5d65a52ffa94e302f80b609d Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 16:03:19 +0100 Subject: ARM: pm: plat-s3c24xx: cleanup s3c_cpu_save s3c_cpu_save does not need to save any registers with the new cpu_suspend calling convention. Remove these redundant instructions. Tested-by: Kukjin Kim Signed-off-by: Russell King --- arch/arm/plat-s3c24xx/sleep.S | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S index f822e6282dd..6ada459498a 100644 --- a/arch/arm/plat-s3c24xx/sleep.S +++ b/arch/arm/plat-s3c24xx/sleep.S @@ -48,10 +48,8 @@ */ ENTRY(s3c_cpu_save) - stmfd sp!, { r4 - r12, lr } adr r3, BSYM(s3c24xx_finish_suspend) - bl cpu_suspend - ldmfd sp!, { r4 - r12, pc } + b cpu_suspend s3c24xx_finish_suspend: @@ jump to final code to send system to sleep -- cgit v1.2.3-18-g5258 From 3d32ead3f16f054fa7f4a85a34eb12c36fd76269 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 16:10:16 +0100 Subject: ARM: pm: mach-s5pv210: cleanup s3c_cpu_save s3c_cpu_save does not need to save any registers with the new cpu_suspend calling convention. Remove these redundant instructions. Signed-off-by: Russell King --- arch/arm/mach-s5pv210/sleep.S | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S index 1182fc848a7..28dfeff342b 100644 --- a/arch/arm/mach-s5pv210/sleep.S +++ b/arch/arm/mach-s5pv210/sleep.S @@ -39,11 +39,8 @@ */ ENTRY(s3c_cpu_save) - - stmfd sp!, { r3 - r12, lr } adr r3, BSYM(s5pv210_finish_suspend) - bl cpu_suspend - ldmfd sp!, { r3 - r12, pc } + b cpu_suspend s5pv210_finish_suspend: ldr r0, =pm_cpu_sleep -- cgit v1.2.3-18-g5258 From 769783a9300d47236766345cc98ce963122c2e4f Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 16:12:27 +0100 Subject: ARM: pm: mach-exynos4: cleanup s3c_cpu_save s3c_cpu_save does not need to save any registers with the new cpu_suspend calling convention. Remove these redundant instructions. Signed-off-by: Russell King --- arch/arm/mach-exynos4/sleep.S | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-exynos4/sleep.S b/arch/arm/mach-exynos4/sleep.S index d9a2287b464..b56a4914985 100644 --- a/arch/arm/mach-exynos4/sleep.S +++ b/arch/arm/mach-exynos4/sleep.S @@ -40,11 +40,8 @@ */ ENTRY(s3c_cpu_save) - - stmfd sp!, { r3 - r12, lr } adr r3, BSYM(exynos4_finish_suspend) - bl cpu_suspend - ldmfd sp!, { r3 - r12, pc } + b cpu_suspend exynos4_finish_suspend: ldr r0, =pm_cpu_sleep -- cgit v1.2.3-18-g5258 From 4d01446fea61a32b6755ae1e11314ffca744dcaa Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 13 Jun 2011 16:33:30 +0100 Subject: ARM: pm: mach-s3c64xx: cleanup s3c_cpu_save s3c_cpu_save does not need to save any registers with the new cpu_suspend calling convention. Remove these redundant instructions. Acked-by: Frank Hofmann Signed-off-by: Russell King --- arch/arm/mach-s3c64xx/sleep.S | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S index dc4f5820210..2fd34333283 100644 --- a/arch/arm/mach-s3c64xx/sleep.S +++ b/arch/arm/mach-s3c64xx/sleep.S @@ -35,10 +35,8 @@ */ ENTRY(s3c_cpu_save) - stmfd sp!, { r4 - r12, lr } adr r3, BSYM(s3c64xx_finish_suspend) - bl cpu_suspend - ldmfd sp!, { r4 - r12, pc } + b cpu_suspend s3c64xx_finish_suspend: @@ call final suspend code -- cgit v1.2.3-18-g5258 From e7089da9567fa8da37e35e1f81a5e3579d0d582d Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jun 2011 19:29:26 +0100 Subject: ARM: pm: samsung: move cpu_suspend into C code Move the call to cpu_suspend into C code, and noticing that all the s3c_cpu_save implementations are now identical, we can move this into the common samsung code. Signed-off-by: Russell King --- arch/arm/mach-exynos4/pm.c | 2 +- arch/arm/mach-exynos4/sleep.S | 18 ------------------ arch/arm/mach-s3c2412/pm.c | 2 +- arch/arm/mach-s3c2416/pm.c | 2 +- arch/arm/mach-s3c64xx/pm.c | 2 +- arch/arm/mach-s3c64xx/sleep.S | 18 ------------------ arch/arm/mach-s5pv210/pm.c | 2 +- arch/arm/mach-s5pv210/sleep.S | 17 ----------------- arch/arm/plat-s3c24xx/sleep.S | 19 ------------------- arch/arm/plat-samsung/include/plat/pm.h | 5 ++--- arch/arm/plat-samsung/pm.c | 6 +++--- 11 files changed, 10 insertions(+), 83 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-exynos4/pm.c b/arch/arm/mach-exynos4/pm.c index 8755ca8dd48..5c01c607664 100644 --- a/arch/arm/mach-exynos4/pm.c +++ b/arch/arm/mach-exynos4/pm.c @@ -280,7 +280,7 @@ static struct sleep_save exynos4_l2cc_save[] = { SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL), }; -void exynos4_cpu_suspend(void) +void exynos4_cpu_suspend(unsigned long arg) { unsigned long tmp; unsigned long mask = 0xFFFFFFFF; diff --git a/arch/arm/mach-exynos4/sleep.S b/arch/arm/mach-exynos4/sleep.S index b56a4914985..0984078f1eb 100644 --- a/arch/arm/mach-exynos4/sleep.S +++ b/arch/arm/mach-exynos4/sleep.S @@ -32,24 +32,6 @@ .text - /* - * s3c_cpu_save - * - * entry: - * r1 = v:p offset - */ - -ENTRY(s3c_cpu_save) - adr r3, BSYM(exynos4_finish_suspend) - b cpu_suspend - -exynos4_finish_suspend: - ldr r0, =pm_cpu_sleep - ldr r0, [ r0 ] - mov pc, r0 - - .ltorg - /* * sleep magic, to allow the bootloader to check for an valid * image to resume to. Must be the first word before the diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c index 752b13a7b3d..fecd85489e5 100644 --- a/arch/arm/mach-s3c2412/pm.c +++ b/arch/arm/mach-s3c2412/pm.c @@ -37,7 +37,7 @@ extern void s3c2412_sleep_enter(void); -static void s3c2412_cpu_suspend(void) +static void s3c2412_cpu_suspend(unsigned long arg) { unsigned long tmp; diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c index 41db2b21e21..268fda74420 100644 --- a/arch/arm/mach-s3c2416/pm.c +++ b/arch/arm/mach-s3c2416/pm.c @@ -24,7 +24,7 @@ extern void s3c2412_sleep_enter(void); -static void s3c2416_cpu_suspend(void) +static void s3c2416_cpu_suspend(unsigned long arg) { flush_cache_all(); diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c index bc1c470b7de..7cc1879af72 100644 --- a/arch/arm/mach-s3c64xx/pm.c +++ b/arch/arm/mach-s3c64xx/pm.c @@ -112,7 +112,7 @@ void s3c_pm_save_core(void) * this. */ -static void s3c64xx_cpu_suspend(void) +static void s3c64xx_cpu_suspend(unsigned long arg) { unsigned long tmp; diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S index 2fd34333283..34313f9c879 100644 --- a/arch/arm/mach-s3c64xx/sleep.S +++ b/arch/arm/mach-s3c64xx/sleep.S @@ -25,24 +25,6 @@ .text - /* s3c_cpu_save - * - * Save enough processor state to allow the restart of the pm.c - * code after resume. - * - * entry: - * r1 = v:p offset - */ - -ENTRY(s3c_cpu_save) - adr r3, BSYM(s3c64xx_finish_suspend) - b cpu_suspend - -s3c64xx_finish_suspend: - @@ call final suspend code - ldr r0, =pm_cpu_sleep - ldr pc, [r0] - /* Sleep magic, the word before the resume entry point so that the * bootloader can check for a resumeable image. */ diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c index 24febae3d4c..309e388a8a8 100644 --- a/arch/arm/mach-s5pv210/pm.c +++ b/arch/arm/mach-s5pv210/pm.c @@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = { SAVE_ITEM(S3C2410_TCNTO(0)), }; -void s5pv210_cpu_suspend(void) +void s5pv210_cpu_suspend(unsigned long arg) { unsigned long tmp; diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S index 28dfeff342b..e3452ccd4b0 100644 --- a/arch/arm/mach-s5pv210/sleep.S +++ b/arch/arm/mach-s5pv210/sleep.S @@ -32,23 +32,6 @@ .text - /* s3c_cpu_save - * - * entry: - * r1 = v:p offset - */ - -ENTRY(s3c_cpu_save) - adr r3, BSYM(s5pv210_finish_suspend) - b cpu_suspend - -s5pv210_finish_suspend: - ldr r0, =pm_cpu_sleep - ldr r0, [ r0 ] - mov pc, r0 - - .ltorg - /* sleep magic, to allow the bootloader to check for an valid * image to resume to. Must be the first word before the * s3c_cpu_resume entry. diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S index 6ada459498a..c56612569b4 100644 --- a/arch/arm/plat-s3c24xx/sleep.S +++ b/arch/arm/plat-s3c24xx/sleep.S @@ -41,25 +41,6 @@ .text - /* s3c_cpu_save - * - * entry: - * r1 = v:p offset - */ - -ENTRY(s3c_cpu_save) - adr r3, BSYM(s3c24xx_finish_suspend) - b cpu_suspend - -s3c24xx_finish_suspend: - @@ jump to final code to send system to sleep - ldr r0, =pm_cpu_sleep - @@ldr pc, [ r0 ] - ldr r0, [ r0 ] - mov pc, r0 - - .ltorg - /* sleep magic, to allow the bootloader to check for an valid * image to resume to. Must be the first word before the * s3c_cpu_resume entry. diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h index 7fb6f6be8c8..0a5b7faca83 100644 --- a/arch/arm/plat-samsung/include/plat/pm.h +++ b/arch/arm/plat-samsung/include/plat/pm.h @@ -42,7 +42,7 @@ extern unsigned long s3c_irqwake_eintallow; /* per-cpu sleep functions */ extern void (*pm_cpu_prep)(void); -extern void (*pm_cpu_sleep)(void); +extern void (*pm_cpu_sleep)(unsigned long); /* Flags for PM Control */ @@ -52,10 +52,9 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */ /* from sleep.S */ -extern int s3c_cpu_save(unsigned long *saveblk, long); extern void s3c_cpu_resume(void); -extern void s3c2410_cpu_suspend(void); +extern void s3c2410_cpu_suspend(unsigned long); /* sleep save info */ diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index 3828191416b..3a6d0768ba0 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c @@ -231,7 +231,7 @@ static void __maybe_unused s3c_pm_show_resume_irqs(int start, void (*pm_cpu_prep)(void); -void (*pm_cpu_sleep)(void); +void (*pm_cpu_sleep)(unsigned long); #define any_allowed(mask, allow) (((mask) & (allow)) != (allow)) @@ -294,11 +294,11 @@ static int s3c_pm_enter(suspend_state_t state) s3c_pm_arch_stop_clocks(); - /* s3c_cpu_save will also act as our return point from when + /* this will also act as our return point from when * we resume as it saves its own register state and restores it * during the resume. */ - s3c_cpu_save(0, PLAT_PHYS_OFFSET - PAGE_OFFSET); + cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, 0, pm_cpu_sleep); /* restore the system state */ -- cgit v1.2.3-18-g5258 From 372c0ac8aca7655b6a8920b10bf9563402ac19d8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jun 2011 19:34:34 +0100 Subject: ARM: pm: samsung: no need to call flush_cache_all() The core suspend code calls flush_cache_all() immediately prior to calling the suspend finisher function, so remove these needless calls from the finisher functions. Signed-off-by: Russell King --- arch/arm/mach-s3c2412/pm.c | 2 -- arch/arm/mach-s3c2416/pm.c | 2 -- 2 files changed, 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c index fecd85489e5..9a1fb898db5 100644 --- a/arch/arm/mach-s3c2412/pm.c +++ b/arch/arm/mach-s3c2412/pm.c @@ -41,8 +41,6 @@ static void s3c2412_cpu_suspend(unsigned long arg) { unsigned long tmp; - flush_cache_all(); - /* set our standby method to sleep */ tmp = __raw_readl(S3C2412_PWRCFG); diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c index 268fda74420..9e67a2a07a8 100644 --- a/arch/arm/mach-s3c2416/pm.c +++ b/arch/arm/mach-s3c2416/pm.c @@ -26,8 +26,6 @@ extern void s3c2412_sleep_enter(void); static void s3c2416_cpu_suspend(unsigned long arg) { - flush_cache_all(); - /* enable wakeup sources regardless of battery state */ __raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG); -- cgit v1.2.3-18-g5258 From a9503d2185bbc28e498c435a07f24986c48b5cbe Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jun 2011 16:29:30 +0100 Subject: ARM: pm: pxa: move cpu_suspend into C code We don't need a veneer for cpu_suspend, it can be called directly from C code now. Move it into the PXA CPU suspend functions, along with the accumulator register saving/restoring. Signed-off-by: Russell King --- arch/arm/mach-pxa/include/mach/pm.h | 4 +-- arch/arm/mach-pxa/pxa25x.c | 3 +- arch/arm/mach-pxa/pxa27x.c | 11 ++++++- arch/arm/mach-pxa/pxa3xx.c | 13 +++++++-- arch/arm/mach-pxa/sleep.S | 58 ++++--------------------------------- arch/arm/mach-pxa/zeus.c | 3 +- 6 files changed, 33 insertions(+), 59 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-pxa/include/mach/pm.h b/arch/arm/mach-pxa/include/mach/pm.h index f15afe01299..a566720527c 100644 --- a/arch/arm/mach-pxa/include/mach/pm.h +++ b/arch/arm/mach-pxa/include/mach/pm.h @@ -22,8 +22,8 @@ struct pxa_cpu_pm_fns { extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns; /* sleep.S */ -extern void pxa25x_cpu_suspend(unsigned int, long); -extern void pxa27x_cpu_suspend(unsigned int, long); +extern void pxa25x_finish_suspend(unsigned long); +extern void pxa27x_finish_suspend(unsigned long); extern int pxa_pm_enter(suspend_state_t state); extern int pxa_pm_prepare(void); diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index fed363cec9c..fd7725cb5c0 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c @@ -244,7 +244,8 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state) switch (state) { case PM_SUSPEND_MEM: - pxa25x_cpu_suspend(PWRMODE_SLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET); + cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, PWRMODE_SLEEP, + pxa25x_finish_suspend); break; } } diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 2fecbec58d8..824379d4375 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -284,6 +284,11 @@ void pxa27x_cpu_pm_restore(unsigned long *sleep_save) void pxa27x_cpu_pm_enter(suspend_state_t state) { extern void pxa_cpu_standby(void); +#ifndef CONFIG_IWMMXT + u64 acc0; + + asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0)); +#endif /* ensure voltage-change sequencer not initiated, which hangs */ PCFR &= ~PCFR_FVC; @@ -299,7 +304,11 @@ void pxa27x_cpu_pm_enter(suspend_state_t state) pxa_cpu_standby(); break; case PM_SUSPEND_MEM: - pxa27x_cpu_suspend(pwrmode, PLAT_PHYS_OFFSET - PAGE_OFFSET); + cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, pwrmode, + pxa27x_finish_suspend); +#ifndef CONFIG_IWMMXT + asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0)); +#endif break; } } diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 8521d7d6f1d..220fd8c15da 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c @@ -141,8 +141,13 @@ static void pxa3xx_cpu_pm_suspend(void) { volatile unsigned long *p = (volatile void *)0xc0000000; unsigned long saved_data = *p; +#ifndef CONFIG_IWMMXT + u64 acc0; - extern void pxa3xx_cpu_suspend(long); + asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0)); +#endif + + extern void pxa3xx_finish_suspend(unsigned long); /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */ CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM); @@ -162,11 +167,15 @@ static void pxa3xx_cpu_pm_suspend(void) /* overwrite with the resume address */ *p = virt_to_phys(cpu_resume); - pxa3xx_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); + cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, 0, pxa3xx_finish_suspend); *p = saved_data; AD3ER = 0; + +#ifndef CONFIG_IWMMXT + asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0)); +#endif } static void pxa3xx_cpu_pm_enter(suspend_state_t state) diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S index 3a67887e6db..1e544be9905 100644 --- a/arch/arm/mach-pxa/sleep.S +++ b/arch/arm/mach-pxa/sleep.S @@ -24,22 +24,9 @@ #ifdef CONFIG_PXA3xx /* - * pxa3xx_cpu_suspend() - forces CPU into sleep state (S2D3C4) - * - * r0 = v:p offset + * pxa3xx_finish_suspend() - forces CPU into sleep state (S2D3C4) */ -ENTRY(pxa3xx_cpu_suspend) - -#ifndef CONFIG_IWMMXT - mra r2, r3, acc0 -#endif - stmfd sp!, {r2 - r12, lr} @ save registers on stack - mov r1, r0 - adr r3, BSYM(pxa3xx_finish_suspend) - bl cpu_suspend - b pxa_cpu_resume - -pxa3xx_finish_suspend: +ENTRY(pxa3xx_finish_suspend) mov r0, #0x06 @ S2D3C4 mode mcr p14, 0, r0, c7, c0, 0 @ enter sleep @@ -48,25 +35,13 @@ pxa3xx_finish_suspend: #ifdef CONFIG_PXA27x /* - * pxa27x_cpu_suspend() + * pxa27x_finish_suspend() * * Forces CPU into sleep state. * * r0 = value for PWRMODE M field for desired sleep state - * r1 = v:p offset */ -ENTRY(pxa27x_cpu_suspend) - -#ifndef CONFIG_IWMMXT - mra r2, r3, acc0 -#endif - stmfd sp!, {r2 - r12, lr} @ save registers on stack - mov r2, r0 @ save sleep mode - adr r3, BSYM(pxa27x_finish_suspend) - bl cpu_suspend - b pxa_cpu_resume - -pxa27x_finish_suspend: +ENTRY(pxa27x_finish_suspend) @ Put the processor to sleep @ (also workaround for sighting 28071) @@ -103,22 +78,14 @@ pxa27x_finish_suspend: #ifdef CONFIG_PXA25x /* - * pxa25x_cpu_suspend() + * pxa25x_finish_suspend() * * Forces CPU into sleep state. * * r0 = value for PWRMODE M field for desired sleep state - * r1 = v:p offset */ -ENTRY(pxa25x_cpu_suspend) - stmfd sp!, {r2 - r12, lr} @ save registers on stack - mov r2, r0 @ save sleep mode - adr r3, BSYM(pxa25x_finish_suspend) - bl cpu_suspend - b pxa_cpu_resume - -pxa25x_finish_suspend: +ENTRY(pxa25x_finish_suspend) @ prepare value for sleep mode mov r1, r0 @ sleep mode @@ -202,16 +169,3 @@ pxa_cpu_do_suspend: mcr p14, 0, r1, c7, c0, 0 @ PWRMODE 20: b 20b @ loop waiting for sleep - -/* - * pxa_cpu_resume() - * - * entry point from bootloader into kernel during resume - */ - .align 5 -pxa_cpu_resume: - ldmfd sp!, {r2, r3} -#ifndef CONFIG_IWMMXT - mar acc0, r2, r3 -#endif - ldmfd sp!, {r4 - r12, pc} @ return to caller diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 00363c7ac18..28eb410ca77 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -676,7 +676,8 @@ static struct pxa2xx_udc_mach_info zeus_udc_info = { static void zeus_power_off(void) { local_irq_disable(); - pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET); + cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, PWRMODE_DEEPSLEEP, + pxa27x_finish_suspend); } #else #define zeus_power_off NULL -- cgit v1.2.3-18-g5258 From 857c1b81f8dd2e2a97d859d7e53dd955e2ab55af Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 22 Jun 2011 12:44:32 +0100 Subject: ARM: pm: omap34xx: no need to save all registers in sleep34xx.S The ABI allows called functions to corrupt r0-r3 and ip (r12). So its pointless saving these registers in the suspend code - the calling function will expect them to be corrupted and so won't rely on their contents after resume. Reviewed-by: Kevin Hilman Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/mach-omap2/sleep34xx.S | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index 63f10669571..9551c7dc71e 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -133,7 +133,7 @@ ENDPROC(enable_omap3630_toggle_l2_on_restore) /* Function to call rom code to save secure ram context */ .align 3 ENTRY(save_secure_ram_context) - stmfd sp!, {r1-r12, lr} @ save registers on stack + stmfd sp!, {r4 - r11, lr} @ save registers on stack adr r3, api_params @ r3 points to parameters str r0, [r3,#0x4] @ r0 has sdram address ldr r12, high_mask @@ -152,7 +152,7 @@ ENTRY(save_secure_ram_context) nop nop nop - ldmfd sp!, {r1-r12, pc} + ldmfd sp!, {r4 - r11, pc} .align sram_phy_addr_mask: .word SRAM_BASE_P @@ -187,7 +187,7 @@ ENTRY(save_secure_ram_context_sz) */ .align 3 ENTRY(omap34xx_cpu_suspend) - stmfd sp!, {r0-r12, lr} @ save registers on stack + stmfd sp!, {r4 - r11, lr} @ save registers on stack /* * r0 contains CPU context save/restore pointer in sdram @@ -329,7 +329,7 @@ omap3_do_wfi: * == Exit point from non-OFF modes == * =================================== */ - ldmfd sp!, {r0-r12, pc} @ restore regs and return + ldmfd sp!, {r4 - r11, pc} @ restore regs and return /* @@ -572,7 +572,7 @@ usettbr0: * ============================== */ restoremmu_on: - ldmfd sp!, {r0-r12, pc} @ restore regs and return + ldmfd sp!, {r4 - r11, pc} @ restore regs and return /* -- cgit v1.2.3-18-g5258 From 2637ce30e145557bf89ebcf35b2d78e729e16e5a Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 22 Jun 2011 12:54:41 +0100 Subject: ARM: pm: omap34xx: remove misleading comment and use of r9 The code alludes to r9 being used to indicate what was lost over the suspend/resume transition. However, although r9 is set, it is never actually used. Also, the comments before the code (which refer to the value of r9) and the comments against the assignment of r9 contradict each other, so just remove them to avoid confusion. Reviewed-by: Kevin Hilman Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/mach-omap2/sleep34xx.S | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index 9551c7dc71e..12e9da2f026 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -381,18 +381,13 @@ restore_3630: restore: /* - * Check what was the reason for mpu reset and store the reason in r9: - * 0 - No context lost - * 1 - Only L1 and logic lost - * 2 - Only L2 lost - In this case, we wont be here - * 3 - Both L1 and L2 lost + * Read the pwstctrl register to check the reason for mpu reset. + * This tells us what was lost. */ ldr r1, pm_pwstctrl_mpu ldr r2, [r1] and r2, r2, #0x3 cmp r2, #0x0 @ Check if target power state was OFF or RET - moveq r9, #0x3 @ MPU OFF => L1 and L2 lost - movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation bne logic_l1_restore ldr r0, l2dis_3630 -- cgit v1.2.3-18-g5258 From 076f2cc449188b7d3d4866730afa3ac7be3e6640 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 22 Jun 2011 15:42:54 +0100 Subject: ARM: pm: omap34xx: convert to generic suspend/resume support Convert omap34xx to use the generic CPU suspend/resume support, rather than implementing its own version. Tested on 3430 LDP. Reviewed-by: Kevin Hilman Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/mach-omap2/pm34xx.c | 47 +++---------- arch/arm/mach-omap2/sleep34xx.S | 143 +--------------------------------------- 2 files changed, 13 insertions(+), 177 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index c155c9d1c82..ae4017750bb 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -40,8 +40,6 @@ #include #include -#include - #include "cm2xxx_3xxx.h" #include "cm-regbits-34xx.h" #include "prm-regbits-34xx.h" @@ -64,11 +62,6 @@ static inline bool is_suspending(void) } #endif -/* Scratchpad offsets */ -#define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4 -#define OMAP343X_TABLE_VALUE_OFFSET 0xc0 -#define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8 - /* pm34xx errata defined in pm.h */ u16 pm34xx_errata; @@ -312,28 +305,9 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) return IRQ_HANDLED; } -/* Function to restore the table entry that was modified for enabling MMU */ -static void restore_table_entry(void) +static void omap34xx_do_sram_idle(unsigned long save_state) { - void __iomem *scratchpad_address; - u32 previous_value, control_reg_value; - u32 *address; - - scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD); - - /* Get address of entry that was modified */ - address = (u32 *)__raw_readl(scratchpad_address + - OMAP343X_TABLE_ADDRESS_OFFSET); - /* Get the previous value which needs to be restored */ - previous_value = __raw_readl(scratchpad_address + - OMAP343X_TABLE_VALUE_OFFSET); - address = __va(address); - *address = previous_value; - flush_tlb_all(); - control_reg_value = __raw_readl(scratchpad_address - + OMAP343X_CONTROL_REG_VALUE_OFFSET); - /* This will enable caches and prediction */ - set_cr(control_reg_value); + _omap_sram_idle(omap3_arm_context, save_state); } void omap_sram_idle(void) @@ -432,12 +406,15 @@ void omap_sram_idle(void) sdrc_pwr = sdrc_read_reg(SDRC_POWER); /* - * omap3_arm_context is the location where ARM registers - * get saved. The restore path then reads from this - * location and restores them back. + * omap3_arm_context is the location where some ARM context + * get saved. The rest is placed on the stack, and restored + * from there before resuming. */ - _omap_sram_idle(omap3_arm_context, save_state); - cpu_init(); + if (save_state == 1 || save_state == 3) + cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, save_state, + omap34xx_do_sram_idle); + else + omap34xx_do_sram_idle(save_state); /* Restore normal SDRC POWER settings */ if (omap_rev() >= OMAP3430_REV_ES3_0 && @@ -445,10 +422,6 @@ void omap_sram_idle(void) core_next_state == PWRDM_POWER_OFF) sdrc_write_reg(sdrc_pwr, SDRC_POWER); - /* Restore table entry modified during MMU restoration */ - if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF) - restore_table_entry(); - /* CORE */ if (core_next_state < PWRDM_POWER_ON) { core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index 12e9da2f026..9a1349ea460 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -211,37 +211,6 @@ save_context_wfi: mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register stmia r8!, {r4-r5} @ Push parameters for restore call - /* Check what that target sleep state is from r1 */ - cmp r1, #0x2 @ Only L2 lost, no need to save context - beq clean_caches - -l1_logic_lost: - mov r4, sp @ Store sp - mrs r5, spsr @ Store spsr - mov r6, lr @ Store lr - stmia r8!, {r4-r6} - - mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register - mrc p15, 0, r5, c2, c0, 0 @ TTBR0 - mrc p15, 0, r6, c2, c0, 1 @ TTBR1 - mrc p15, 0, r7, c2, c0, 2 @ TTBCR - stmia r8!, {r4-r7} - - mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register - mrc p15, 0, r5, c10, c2, 0 @ PRRR - mrc p15, 0, r6, c10, c2, 1 @ NMRR - stmia r8!,{r4-r6} - - mrc p15, 0, r4, c13, c0, 1 @ Context ID - mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID - mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address - mrs r7, cpsr @ Store current cpsr - stmia r8!, {r4-r7} - - mrc p15, 0, r4, c1, c0, 0 @ save control register - stmia r8!, {r4} - -clean_caches: /* * jump out to kernel flush routine * - reuse that code is better @@ -466,109 +435,11 @@ logic_l1_restore: orr r1, r1, #2 @ re-enable L2 cache mcr p15, 0, r1, c1, c0, 1 skipl2reen: - mov r1, #0 - /* - * Invalidate all instruction caches to PoU - * and flush branch target cache - */ - mcr p15, 0, r1, c7, c5, 0 - ldr r4, scratchpad_base - ldr r3, [r4,#0xBC] - adds r3, r3, #16 - - ldmia r3!, {r4-r6} - mov sp, r4 @ Restore sp - msr spsr_cxsf, r5 @ Restore spsr - mov lr, r6 @ Restore lr - - ldmia r3!, {r4-r7} - mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register - mcr p15, 0, r5, c2, c0, 0 @ TTBR0 - mcr p15, 0, r6, c2, c0, 1 @ TTBR1 - mcr p15, 0, r7, c2, c0, 2 @ TTBCR - - ldmia r3!,{r4-r6} - mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register - mcr p15, 0, r5, c10, c2, 0 @ PRRR - mcr p15, 0, r6, c10, c2, 1 @ NMRR - - - ldmia r3!,{r4-r7} - mcr p15, 0, r4, c13, c0, 1 @ Context ID - mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID - mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address - msr cpsr, r7 @ store cpsr - - /* Enabling MMU here */ - mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl - /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */ - and r7, #0x7 - cmp r7, #0x0 - beq usettbr0 -ttbr_error: - /* - * More work needs to be done to support N[0:2] value other than 0 - * So looping here so that the error can be detected - */ - b ttbr_error -usettbr0: - mrc p15, 0, r2, c2, c0, 0 - ldr r5, ttbrbit_mask - and r2, r5 - mov r4, pc - ldr r5, table_index_mask - and r4, r5 @ r4 = 31 to 20 bits of pc - /* Extract the value to be written to table entry */ - ldr r1, table_entry - /* r1 has the value to be written to table entry*/ - add r1, r1, r4 - /* Getting the address of table entry to modify */ - lsr r4, #18 - /* r2 has the location which needs to be modified */ - add r2, r4 - /* Storing previous entry of location being modified */ - ldr r5, scratchpad_base - ldr r4, [r2] - str r4, [r5, #0xC0] - /* Modify the table entry */ - str r1, [r2] - /* - * Storing address of entry being modified - * - will be restored after enabling MMU - */ - ldr r5, scratchpad_base - str r2, [r5, #0xC4] - - mov r0, #0 - mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer - mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array - mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB - mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB - /* - * Restore control register. This enables the MMU. - * The caches and prediction are not enabled here, they - * will be enabled after restoring the MMU table entry. - */ - ldmia r3!, {r4} - /* Store previous value of control register in scratchpad */ - str r4, [r5, #0xC8] - ldr r2, cache_pred_disable_mask - and r4, r2 - mcr p15, 0, r4, c1, c0, 0 - dsb - isb - ldr r0, =restoremmu_on - bx r0 - -/* - * ============================== - * == Exit point from OFF mode == - * ============================== - */ -restoremmu_on: - ldmfd sp!, {r4 - r11, pc} @ restore regs and return + /* Now branch to the common CPU resume function */ + b cpu_resume + .ltorg /* * Internal functions @@ -719,14 +590,6 @@ sram_base: .word SRAM_BASE_P + 0x8000 sdrc_power: .word SDRC_POWER_V -ttbrbit_mask: - .word 0xFFFFC000 -table_index_mask: - .word 0xFFF00000 -table_entry: - .word 0x00000C02 -cache_pred_disable_mask: - .word 0xFFFFE7FB control_stat: .word CONTROL_STAT control_mem_rta: -- cgit v1.2.3-18-g5258 From 14c79bbed7e06135bcbccb2b92c19df7115b4502 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Thu, 23 Jun 2011 17:16:14 -0700 Subject: ARM: pm: omap34xx: remove get_*_restore_pointer functions, directly use entry points Upon return from off-mode, the ROM code jumps to a restore function saved in the scratchpad. Based on SoC revision or errata, this restore entry point is different. Current code uses some helper functions in sleep34xx.S (get_*_restore_pointer) to get the restore function entry point. When returning from off-mode, this code is executed from SDRAM, so there's no reason to use these helper functions when using the SDRAM entry points directly would work just fine. This patch uses ENTRY/ENDPROC to create "real" entry points for these functions, and uses those values directly when writing the scratchpad. Tested all three entry points - restore_es3: 3430/n900 - restore_3630: 3630/Zoom3 - restore: 3530/Overo Suggested-by: Russell King Acked-by: Jean Pihet Acked-by: Santosh Shilimkar Signed-off-by: Kevin Hilman Signed-off-by: Russell King --- arch/arm/mach-omap2/control.c | 7 +++--- arch/arm/mach-omap2/control.h | 6 ++--- arch/arm/mach-omap2/sleep34xx.S | 55 +++++++---------------------------------- 3 files changed, 16 insertions(+), 52 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c index da53ba3917c..aab884fecc5 100644 --- a/arch/arm/mach-omap2/control.c +++ b/arch/arm/mach-omap2/control.c @@ -286,14 +286,15 @@ void omap3_save_scratchpad_contents(void) scratchpad_contents.boot_config_ptr = 0x0; if (cpu_is_omap3630()) scratchpad_contents.public_restore_ptr = - virt_to_phys(get_omap3630_restore_pointer()); + virt_to_phys(omap3_restore_3630); else if (omap_rev() != OMAP3430_REV_ES3_0 && omap_rev() != OMAP3430_REV_ES3_1) scratchpad_contents.public_restore_ptr = - virt_to_phys(get_restore_pointer()); + virt_to_phys(omap3_restore); else scratchpad_contents.public_restore_ptr = - virt_to_phys(get_es3_restore_pointer()); + virt_to_phys(omap3_restore_es3); + if (omap_type() == OMAP2_DEVICE_TYPE_GP) scratchpad_contents.secure_ram_restore_ptr = 0x0; else diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h index a016c8b59e0..d4ef75d5a38 100644 --- a/arch/arm/mach-omap2/control.h +++ b/arch/arm/mach-omap2/control.h @@ -386,9 +386,9 @@ extern void omap4_ctrl_pad_writel(u32 val, u16 offset); extern void omap3_save_scratchpad_contents(void); extern void omap3_clear_scratchpad_contents(void); -extern u32 *get_restore_pointer(void); -extern u32 *get_es3_restore_pointer(void); -extern u32 *get_omap3630_restore_pointer(void); +extern void omap3_restore(void); +extern void omap3_restore_es3(void); +extern void omap3_restore_3630(void); extern u32 omap3_arm_context[128]; extern void omap3_control_save_context(void); extern void omap3_control_restore_context(void); diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index 9a1349ea460..d18f52e46c7 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -74,46 +74,6 @@ * API functions */ -/* - * The "get_*restore_pointer" functions are used to provide a - * physical restore address where the ROM code jumps while waking - * up from MPU OFF/OSWR state. - * The restore pointer is stored into the scratchpad. - */ - - .text -/* Function call to get the restore pointer for resume from OFF */ -ENTRY(get_restore_pointer) - stmfd sp!, {lr} @ save registers on stack - adr r0, restore - ldmfd sp!, {pc} @ restore regs and return -ENDPROC(get_restore_pointer) - .align -ENTRY(get_restore_pointer_sz) - .word . - get_restore_pointer - - .text -/* Function call to get the restore pointer for 3630 resume from OFF */ -ENTRY(get_omap3630_restore_pointer) - stmfd sp!, {lr} @ save registers on stack - adr r0, restore_3630 - ldmfd sp!, {pc} @ restore regs and return -ENDPROC(get_omap3630_restore_pointer) - .align -ENTRY(get_omap3630_restore_pointer_sz) - .word . - get_omap3630_restore_pointer - - .text -/* Function call to get the restore pointer for ES3 to resume from OFF */ -ENTRY(get_es3_restore_pointer) - stmfd sp!, {lr} @ save registers on stack - adr r0, restore_es3 - ldmfd sp!, {pc} @ restore regs and return -ENDPROC(get_es3_restore_pointer) - .align -ENTRY(get_es3_restore_pointer_sz) - .word . - get_es3_restore_pointer - .text /* * L2 cache needs to be toggled for stable OFF mode functionality on 3630. @@ -316,12 +276,12 @@ omap3_do_wfi: * restore_3630: applies to 36xx * restore: common code for 3xxx */ -restore_es3: +ENTRY(omap3_restore_es3) ldr r5, pm_prepwstst_core_p ldr r4, [r5] and r4, r4, #0x3 cmp r4, #0x0 @ Check if previous power state of CORE is OFF - bne restore + bne omap3_restore adr r0, es3_sdrc_fix ldr r1, sram_base ldr r2, es3_sdrc_fix_sz @@ -333,22 +293,24 @@ copy_to_sram: bne copy_to_sram ldr r1, sram_base blx r1 - b restore + b omap3_restore +ENDPROC(omap3_restore_es3) -restore_3630: +ENTRY(omap3_restore_3630) ldr r1, pm_prepwstst_core_p ldr r2, [r1] and r2, r2, #0x3 cmp r2, #0x0 @ Check if previous power state of CORE is OFF - bne restore + bne omap3_restore /* Disable RTA before giving control */ ldr r1, control_mem_rta mov r2, #OMAP36XX_RTA_DISABLE str r2, [r1] +ENDPROC(omap3_restore_3630) /* Fall through to common code for the remaining logic */ -restore: +ENTRY(omap3_restore) /* * Read the pwstctrl register to check the reason for mpu reset. * This tells us what was lost. @@ -438,6 +400,7 @@ skipl2reen: /* Now branch to the common CPU resume function */ b cpu_resume +ENDPROC(omap3_restore) .ltorg -- cgit v1.2.3-18-g5258 From 2c74a0cefa463a7a483b07ba4d2ea8e4ec7b996c Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 22 Jun 2011 17:41:48 +0100 Subject: ARM: pm: hide 1st and 2nd arguments to cpu_suspend from platform code The first and second arguments shouldn't concern platform code, so hide them from each platforms caller. Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/include/asm/suspend.h | 19 +++++++++++++++++++ arch/arm/include/asm/system.h | 2 -- arch/arm/kernel/sleep.S | 4 ++-- arch/arm/mach-omap2/pm34xx.c | 5 +++-- arch/arm/mach-pxa/palmz72.c | 1 + arch/arm/mach-pxa/pxa25x.c | 4 ++-- arch/arm/mach-pxa/pxa27x.c | 4 ++-- arch/arm/mach-pxa/pxa3xx.c | 3 ++- arch/arm/mach-pxa/zeus.c | 4 ++-- arch/arm/mach-sa1100/pm.c | 3 ++- arch/arm/plat-samsung/pm.c | 3 ++- 11 files changed, 37 insertions(+), 15 deletions(-) create mode 100644 arch/arm/include/asm/suspend.h (limited to 'arch/arm') diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h new file mode 100644 index 00000000000..8d5b4461c20 --- /dev/null +++ b/arch/arm/include/asm/suspend.h @@ -0,0 +1,19 @@ +#ifndef __ASM_ARM_SUSPEND_H +#define __ASM_ARM_SUSPEND_H + +#include + +extern void cpu_resume(void); + +/* + * Hide the first two arguments to __cpu_suspend - these are an implementation + * detail which platform code shouldn't have to know about. + */ +static inline void cpu_suspend(unsigned long arg, void (*fn)(unsigned long)) +{ + extern void __cpu_suspend(int, long, unsigned long, + void (*)(unsigned long)); + __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn); +} + +#endif diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 50be6055df8..832888d0c20 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -106,8 +106,6 @@ extern void __show_regs(struct pt_regs *); extern int cpu_architecture(void); extern void cpu_init(void); -extern void cpu_suspend(int, long, unsigned long, void (*)(unsigned long)); -extern void cpu_resume(void); void arm_machine_restart(char mode, const char *cmd); extern void (*arm_pm_restart)(char str, const char *cmd); diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 53922748d10..c156d0e5f45 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -14,7 +14,7 @@ * r3 = suspend function * Note: does not return until system resumes */ -ENTRY(cpu_suspend) +ENTRY(__cpu_suspend) stmfd sp!, {r4 - r11, lr} #ifdef MULTI_CPU ldr r10, =processor @@ -56,7 +56,7 @@ ENTRY(cpu_suspend) bl __cpuc_flush_kern_all #endif ldmfd sp!, {r0, pc} @ call suspend fn -ENDPROC(cpu_suspend) +ENDPROC(__cpu_suspend) .ltorg /* diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index ae4017750bb..3e9a13e1ac5 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -31,6 +31,8 @@ #include #include +#include + #include #include "clockdomain.h" #include "powerdomain.h" @@ -411,8 +413,7 @@ void omap_sram_idle(void) * from there before resuming. */ if (save_state == 1 || save_state == 3) - cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, save_state, - omap34xx_do_sram_idle); + cpu_suspend(save_state, omap34xx_do_sram_idle); else omap34xx_do_sram_idle(save_state); diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 65f24f0b77e..5a5329bc33f 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c @@ -33,6 +33,7 @@ #include #include +#include #include #include diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index fd7725cb5c0..9c434d21a27 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -244,8 +245,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state) switch (state) { case PM_SUSPEND_MEM: - cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, PWRMODE_SLEEP, - pxa25x_finish_suspend); + cpu_suspend(PWRMODE_SLEEP, pxa25x_finish_suspend); break; } } diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 824379d4375..9d2400b5f50 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -304,8 +305,7 @@ void pxa27x_cpu_pm_enter(suspend_state_t state) pxa_cpu_standby(); break; case PM_SUSPEND_MEM: - cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, pwrmode, - pxa27x_finish_suspend); + cpu_suspend(pwrmode, pxa27x_finish_suspend); #ifndef CONFIG_IWMMXT asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0)); #endif diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 220fd8c15da..9fe947b5d5f 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -167,7 +168,7 @@ static void pxa3xx_cpu_pm_suspend(void) /* overwrite with the resume address */ *p = virt_to_phys(cpu_resume); - cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, 0, pxa3xx_finish_suspend); + cpu_suspend(0, pxa3xx_finish_suspend); *p = saved_data; diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 28eb410ca77..9b99cc164de 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -31,6 +31,7 @@ #include #include +#include #include #include @@ -676,8 +677,7 @@ static struct pxa2xx_udc_mach_info zeus_udc_info = { static void zeus_power_off(void) { local_irq_disable(); - cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, PWRMODE_DEEPSLEEP, - pxa27x_finish_suspend); + cpu_suspend(PWRMODE_DEEPSLEEP, pxa27x_finish_suspend); } #else #define zeus_power_off NULL diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index 259ed3bcc3f..cf9a1e9fb70 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c @@ -29,6 +29,7 @@ #include #include +#include #include #include @@ -75,7 +76,7 @@ static int sa11x0_pm_enter(suspend_state_t state) PSPR = virt_to_phys(cpu_resume); /* go zzz */ - cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, 0, sa1100_finish_suspend); + cpu_suspend(0, sa1100_finish_suspend); /* * Ensure not to come back here if it wasn't intended diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index 3a6d0768ba0..69d6b040a01 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -298,7 +299,7 @@ static int s3c_pm_enter(suspend_state_t state) * we resume as it saves its own register state and restores it * during the resume. */ - cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, 0, pm_cpu_sleep); + cpu_suspend(0, pm_cpu_sleep); /* restore the system state */ -- cgit v1.2.3-18-g5258 From 0853f96f13e0b60f1c319bcd7f6a3a84d0d1e706 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 23 Jun 2011 14:24:09 +0100 Subject: ARM: pm: ensure our temporary page table entry is removed from the TLB Ensure that our temporary page table entry is flushed from the TLB before we resume normal operations. This ensures that userspace won't trip over the stale TLB entry. Tested-by: Kevin Hilman Acked-by: Jean Pihet Signed-off-by: Russell King --- arch/arm/include/asm/suspend.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h index 8d5b4461c20..f8db9d096bc 100644 --- a/arch/arm/include/asm/suspend.h +++ b/arch/arm/include/asm/suspend.h @@ -2,6 +2,7 @@ #define __ASM_ARM_SUSPEND_H #include +#include extern void cpu_resume(void); @@ -14,6 +15,7 @@ static inline void cpu_suspend(unsigned long arg, void (*fn)(unsigned long)) extern void __cpu_suspend(int, long, unsigned long, void (*)(unsigned long)); __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn); + flush_tlb_all(); } #endif -- cgit v1.2.3-18-g5258 From 5c1f9668692061b97125e343721c7514ca05a8bb Mon Sep 17 00:00:00 2001 From: Jean-Christophe PLAGNIOL-VILLARD Date: Tue, 21 Jun 2011 11:24:33 +0800 Subject: at91: fix at91_set_serial_console: use platform device id Signed-off-by: Jean-Christophe PLAGNIOL-VILLARD --- arch/arm/mach-at91/at91cap9_devices.c | 2 +- arch/arm/mach-at91/at91rm9200_devices.c | 2 +- arch/arm/mach-at91/at91sam9260_devices.c | 2 +- arch/arm/mach-at91/at91sam9261_devices.c | 2 +- arch/arm/mach-at91/at91sam9263_devices.c | 2 +- arch/arm/mach-at91/at91sam9g45_devices.c | 2 +- arch/arm/mach-at91/at91sam9rl_devices.c | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c index cd850ed6f33..dba0d8d8a4b 100644 --- a/arch/arm/mach-at91/at91cap9_devices.c +++ b/arch/arm/mach-at91/at91cap9_devices.c @@ -1220,7 +1220,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91cap9_set_console_clock(portnr); + at91cap9_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index a0ba475be04..7227755ffec 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c @@ -1135,7 +1135,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91rm9200_set_console_clock(portnr); + at91rm9200_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index 1fdeb9058a7..39f81f47b4b 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -1173,7 +1173,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91sam9260_set_console_clock(portnr); + at91sam9260_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index 3eb4538fcee..5004bf0a05f 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c @@ -1013,7 +1013,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91sam9261_set_console_clock(portnr); + at91sam9261_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index ffe081b77ed..a050f41fc86 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c @@ -1395,7 +1395,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91sam9263_set_console_clock(portnr); + at91sam9263_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 05674865bc2..600bffb01ed 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c @@ -1550,7 +1550,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91sam9g45_set_console_clock(portnr); + at91sam9g45_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index c296045f2b6..aacb19dc922 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c @@ -1168,7 +1168,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91sam9rl_set_console_clock(portnr); + at91sam9rl_set_console_clock(at91_uarts[portnr]->id); } } -- cgit v1.2.3-18-g5258 From 9d87159e571d73ccf87adf1ee3691b861253d900 Mon Sep 17 00:00:00 2001 From: Jean-Christophe PLAGNIOL-VILLARD Date: Tue, 21 Jun 2011 14:24:33 +0800 Subject: at91: fix udc, ehci and mmc clock device name for cap9/9g45/9rl Signed-off-by: Jean-Christophe PLAGNIOL-VILLARD Acked-by: Nicolas Ferre --- arch/arm/mach-at91/at91cap9.c | 4 ++-- arch/arm/mach-at91/at91sam9g45.c | 10 +++++----- arch/arm/mach-at91/at91sam9rl.c | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c index 17fae4a42ab..5e6f8a7958a 100644 --- a/arch/arm/mach-at91/at91cap9.c +++ b/arch/arm/mach-at91/at91cap9.c @@ -223,8 +223,8 @@ static struct clk *periph_clocks[] __initdata = { }; static struct clk_lookup periph_clocks_lookups[] = { - CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk), - CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk), + CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk), + CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk), CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk), CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk), CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index 2bb6ff9af1c..11e214121b2 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c @@ -217,11 +217,11 @@ static struct clk *periph_clocks[] __initdata = { static struct clk_lookup periph_clocks_lookups[] = { /* One additional fake clock for ohci */ CLKDEV_CON_ID("ohci_clk", &uhphs_clk), - CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci.0", &uhphs_clk), - CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk), - CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk), - CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk), - CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk), + CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci", &uhphs_clk), + CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk), + CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk), + CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.0", &mmc0_clk), + CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.1", &mmc1_clk), CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb0_clk), diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c index 1a40f16b66c..29dff18ed13 100644 --- a/arch/arm/mach-at91/at91sam9rl.c +++ b/arch/arm/mach-at91/at91sam9rl.c @@ -191,8 +191,8 @@ static struct clk *periph_clocks[] __initdata = { }; static struct clk_lookup periph_clocks_lookups[] = { - CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk), - CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk), + CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk), + CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk), CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk), -- cgit v1.2.3-18-g5258 From c5efefac659870744f6e203e28abd095ac0f590b Mon Sep 17 00:00:00 2001 From: Joachim Eastwood Date: Thu, 2 Jun 2011 01:36:09 +0200 Subject: at91: Use "pclk" as con_id on at91cap9 and at91rm9200 Hello, I am not 100% sure this is the right thing to do, but it makes the atmel-ssc driver happy on my at91rm9200 board. This unifies the con_id across all at91 machines. The atmel-ssc driver expects the con_id to be "pclk" or it will fail probing. Signed-off-by: Joachim Eastwood Signed-off-by: Jean-Christophe PLAGNIOL-VILLARD --- arch/arm/mach-at91/at91cap9.c | 4 ++-- arch/arm/mach-at91/at91rm9200.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c index 5e6f8a7958a..f1013d08bb5 100644 --- a/arch/arm/mach-at91/at91cap9.c +++ b/arch/arm/mach-at91/at91cap9.c @@ -230,8 +230,8 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk), - CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk), - CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk), + CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), + CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), }; static struct clk_lookup usart_clocks_lookups[] = { diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index b228ce9e21a..83a1a3fee55 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c @@ -199,9 +199,9 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk), CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk), CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk), - CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk), - CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk), - CLKDEV_CON_DEV_ID("ssc", "ssc.2", &ssc2_clk), + CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), + CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), + CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk), }; static struct clk_lookup usart_clocks_lookups[] = { -- cgit v1.2.3-18-g5258 From b6fcd313c9db4f4c4feaf9d5d48b293d5fa27061 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 10:32:05 +0100 Subject: ARM: entry: remove unused irq_prio_table macro Platforms provide an empty irq_prio_table macro, and as nothing uses this macro, it can simply be removed. Signed-off-by: Russell King --- arch/arm/mach-bcmring/include/mach/entry-macro.S | 4 ---- arch/arm/mach-davinci/include/mach/entry-macro.S | 3 --- arch/arm/mach-h720x/include/mach/entry-macro.S | 3 --- arch/arm/mach-lpc32xx/include/mach/entry-macro.S | 4 ---- arch/arm/mach-omap2/include/mach/entry-macro.S | 3 --- arch/arm/mach-pnx4008/include/mach/entry-macro.S | 5 ----- arch/arm/plat-mxc/include/mach/entry-macro.S | 4 ---- 7 files changed, 26 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-bcmring/include/mach/entry-macro.S b/arch/arm/mach-bcmring/include/mach/entry-macro.S index 7d393ca010a..94c950d783b 100644 --- a/arch/arm/mach-bcmring/include/mach/entry-macro.S +++ b/arch/arm/mach-bcmring/include/mach/entry-macro.S @@ -80,7 +80,3 @@ .macro arch_ret_to_user, tmp1, tmp2 .endm - - .macro irq_prio_table - .endm - diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S index fbdebc7cb40..e14c0dc0e12 100644 --- a/arch/arm/mach-davinci/include/mach/entry-macro.S +++ b/arch/arm/mach-davinci/include/mach/entry-macro.S @@ -46,6 +46,3 @@ #endif 1002: .endm - - .macro irq_prio_table - .endm diff --git a/arch/arm/mach-h720x/include/mach/entry-macro.S b/arch/arm/mach-h720x/include/mach/entry-macro.S index 6d3b917c4a1..c3948e5ba4a 100644 --- a/arch/arm/mach-h720x/include/mach/entry-macro.S +++ b/arch/arm/mach-h720x/include/mach/entry-macro.S @@ -57,9 +57,6 @@ tst \irqstat, #1 @ bit 0 should be set .endm - .macro irq_prio_table - .endm - #else #error hynix processor selection missmatch #endif diff --git a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S index 870227c9660..b725f6c9397 100644 --- a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S +++ b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S @@ -41,7 +41,3 @@ rsb \irqnr, \irqnr, #31 teq \irqstat, #0 .endm - - .macro irq_prio_table - .endm - diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S index a48690b9099..ceb8b7e593d 100644 --- a/arch/arm/mach-omap2/include/mach/entry-macro.S +++ b/arch/arm/mach-omap2/include/mach/entry-macro.S @@ -165,6 +165,3 @@ #endif #endif /* MULTI_OMAP2 */ - - .macro irq_prio_table - .endm diff --git a/arch/arm/mach-pnx4008/include/mach/entry-macro.S b/arch/arm/mach-pnx4008/include/mach/entry-macro.S index 8003037578e..db7eeebf30d 100644 --- a/arch/arm/mach-pnx4008/include/mach/entry-macro.S +++ b/arch/arm/mach-pnx4008/include/mach/entry-macro.S @@ -120,8 +120,3 @@ 1003: .endm - - .macro irq_prio_table - .endm - - diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S index 2e49e71b1b9..066d464d322 100644 --- a/arch/arm/plat-mxc/include/mach/entry-macro.S +++ b/arch/arm/plat-mxc/include/mach/entry-macro.S @@ -78,7 +78,3 @@ movs \irqnr, \irqnr #endif .endm - - @ irq priority table (not used) - .macro irq_prio_table - .endm -- cgit v1.2.3-18-g5258 From 2342aa282b59f028cf0850b483b34587db2c22b0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 10:04:15 +0100 Subject: ARM: entry: shark: don't directly reference registers in macros Directly referencing registers in macros makes assembly code harder to change, because the macros have side effects which are non-obvious. Use the provided 'base' register rather than directly referencing r4. Signed-off-by: Russell King --- arch/arm/mach-shark/include/mach/entry-macro.S | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-shark/include/mach/entry-macro.S b/arch/arm/mach-shark/include/mach/entry-macro.S index e2853c0a333..0bb6cc626eb 100644 --- a/arch/arm/mach-shark/include/mach/entry-macro.S +++ b/arch/arm/mach-shark/include/mach/entry-macro.S @@ -11,17 +11,17 @@ .endm .macro get_irqnr_preamble, base, tmp + mov \base, #0xe0000000 .endm .macro arch_ret_to_user, tmp1, tmp2 .endm .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mov r4, #0xe0000000 mov \irqstat, #0x0C - strb \irqstat, [r4, #0x20] @outb(0x0C, 0x20) /* Poll command */ - ldrb \irqnr, [r4, #0x20] @irq = inb(0x20) & 7 + strb \irqstat, [\base, #0x20] @outb(0x0C, 0x20) /* Poll command */ + ldrb \irqnr, [\base, #0x20] @irq = inb(0x20) & 7 and \irqstat, \irqnr, #0x80 teq \irqstat, #0 beq 43f @@ -29,8 +29,8 @@ teq \irqnr, #2 bne 44f 43: mov \irqstat, #0x0C - strb \irqstat, [r4, #0xa0] @outb(0x0C, 0xA0) /* Poll command */ - ldrb \irqnr, [r4, #0xa0] @irq = (inb(0xA0) & 7) + 8 + strb \irqstat, [\base, #0xa0] @outb(0x0C, 0xA0) /* Poll command */ + ldrb \irqnr, [\base, #0xa0] @irq = (inb(0xA0) & 7) + 8 and \irqstat, \irqnr, #0x80 teq \irqstat, #0 beq 44f -- cgit v1.2.3-18-g5258 From 090ab3ff8ebb842c0f159d34d57d6e51bd94ace1 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 26 Apr 2011 06:29:53 +0100 Subject: ARM: 6886/1: mmc, Add zboot from eSD support for SuperH Mobile ARM This allows a ROM-able zImage to be written to eSD and for SuperH Mobile ARM to boot directly from the SDHI hardware block. This is achieved by the MaskROM loading the first portion of the image into MERAM and then jumping to it. This portion contains loader code which copies the entire image to SDRAM and jumps to it. From there the zImage boot code proceeds as normal, uncompressing the image into its final location and then jumping to it. Cc: Paul Mundt Acked-by: Magnus Damm Acked-by: Paul Mundt Signed-off-by: Simon Horman Signed-off-by: Russell King --- arch/arm/Kconfig | 33 +- arch/arm/boot/compressed/Makefile | 10 +- arch/arm/boot/compressed/head-shmobile.S | 12 +- arch/arm/boot/compressed/mmcif-sh7372.c | 2 +- arch/arm/boot/compressed/sdhi-sh7372.c | 95 +++++ arch/arm/boot/compressed/sdhi-shmobile.c | 449 ++++++++++++++++++++++ arch/arm/boot/compressed/sdhi-shmobile.h | 11 + arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h | 21 + arch/arm/mach-shmobile/include/mach/sdhi.h | 16 + 9 files changed, 632 insertions(+), 17 deletions(-) create mode 100644 arch/arm/boot/compressed/sdhi-sh7372.c create mode 100644 arch/arm/boot/compressed/sdhi-shmobile.c create mode 100644 arch/arm/boot/compressed/sdhi-shmobile.h create mode 100644 arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h create mode 100644 arch/arm/mach-shmobile/include/mach/sdhi.h (limited to 'arch/arm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index cc0dcbf1f6b..cd8f520dd03 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1718,17 +1718,34 @@ config ZBOOT_ROM Say Y here if you intend to execute your compressed kernel image (zImage) directly from ROM or flash. If unsure, say N. +choice + prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)" + depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL + default ZBOOT_ROM_NONE + help + Include experimental SD/MMC loading code in the ROM-able zImage. + With this enabled it is possible to write the the ROM-able zImage + kernel image to an MMC or SD card and boot the kernel straight + from the reset vector. At reset the processor Mask ROM will load + the first part of the the ROM-able zImage which in turn loads the + rest the kernel image to RAM. + +config ZBOOT_ROM_NONE + bool "No SD/MMC loader in zImage (EXPERIMENTAL)" + help + Do not load image from SD or MMC + config ZBOOT_ROM_MMCIF bool "Include MMCIF loader in zImage (EXPERIMENTAL)" - depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL help - Say Y here to include experimental MMCIF loading code in the - ROM-able zImage. With this enabled it is possible to write the - the ROM-able zImage kernel image to an MMC card and boot the - kernel straight from the reset vector. At reset the processor - Mask ROM will load the first part of the the ROM-able zImage - which in turn loads the rest the kernel image to RAM using the - MMCIF hardware block. + Load image from MMCIF hardware block. + +config ZBOOT_ROM_SH_MOBILE_SDHI + bool "Include SuperH Mobile SDHI loader in zImage (EXPERIMENTAL)" + help + Load image from SDHI hardware block + +endchoice config CMDLINE string "Default kernel command string" diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 23aad072230..0c74a6fab95 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -6,13 +6,19 @@ OBJS = -# Ensure that mmcif loader code appears early in the image +# Ensure that MMCIF loader code appears early in the image # to minimise that number of bocks that have to be read in # order to load it. ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y) -ifeq ($(CONFIG_ARCH_SH7372),y) OBJS += mmcif-sh7372.o endif + +# Ensure that SDHI loader code appears early in the image +# to minimise that number of bocks that have to be read in +# order to load it. +ifeq ($(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI),y) +OBJS += sdhi-shmobile.o +OBJS += sdhi-sh7372.o endif AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S index c943d2e7da9..fe3719b516f 100644 --- a/arch/arm/boot/compressed/head-shmobile.S +++ b/arch/arm/boot/compressed/head-shmobile.S @@ -25,14 +25,14 @@ /* load board-specific initialization code */ #include -#ifdef CONFIG_ZBOOT_ROM_MMCIF - /* Load image from MMC */ - adr sp, __tmp_stack + 128 +#if defined(CONFIG_ZBOOT_ROM_MMCIF) || defined(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI) + /* Load image from MMC/SD */ + adr sp, __tmp_stack + 256 ldr r0, __image_start ldr r1, __image_end subs r1, r1, r0 ldr r0, __load_base - bl mmcif_loader + bl mmc_loader /* Jump to loaded code */ ldr r0, __loaded @@ -51,9 +51,9 @@ __loaded: .long __continue .align __tmp_stack: - .space 128 + .space 256 __continue: -#endif /* CONFIG_ZBOOT_ROM_MMCIF */ +#endif /* CONFIG_ZBOOT_ROM_MMC || CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI */ b 1f __atags:@ tag #1 diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c index 7453c8337b8..b6f61d9a5a1 100644 --- a/arch/arm/boot/compressed/mmcif-sh7372.c +++ b/arch/arm/boot/compressed/mmcif-sh7372.c @@ -40,7 +40,7 @@ * to an MMC card * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1 */ -asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len) +asmlinkage void mmc_loader(unsigned char *buf, unsigned long len) { mmc_init_progress(); mmc_update_progress(MMC_PROGRESS_ENTER); diff --git a/arch/arm/boot/compressed/sdhi-sh7372.c b/arch/arm/boot/compressed/sdhi-sh7372.c new file mode 100644 index 00000000000..d403a8b24d7 --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-sh7372.c @@ -0,0 +1,95 @@ +/* + * SuperH Mobile SDHI + * + * Copyright (C) 2010 Magnus Damm + * Copyright (C) 2010 Kuninori Morimoto + * Copyright (C) 2010 Simon Horman + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Parts inspired by u-boot + */ + +#include +#include +#include +#include + +#include "sdhi-shmobile.h" + +#define PORT179CR 0xe60520b3 +#define PORT180CR 0xe60520b4 +#define PORT181CR 0xe60520b5 +#define PORT182CR 0xe60520b6 +#define PORT183CR 0xe60520b7 +#define PORT184CR 0xe60520b8 + +#define SMSTPCR3 0xe615013c + +#define CR_INPUT_ENABLE 0x10 +#define CR_FUNCTION1 0x01 + +#define SDHI1_BASE (void __iomem *)0xe6860000 +#define SDHI_BASE SDHI1_BASE + +/* SuperH Mobile SDHI loader + * + * loads the zImage from an SD card starting from block 0 + * on physical partition 1 + * + * The image must be start with a vrl4 header and + * the zImage must start at offset 512 of the image. That is, + * at block 1 (=byte 512) of physical partition 1 + * + * Use the following line to write the vrl4 formated zImage + * to an SD card + * # dd if=vrl4.out of=/dev/sdx bs=512 + */ +asmlinkage void mmc_loader(unsigned short *buf, unsigned long len) +{ + int high_capacity; + + mmc_init_progress(); + + mmc_update_progress(MMC_PROGRESS_ENTER); + /* Initialise SDHI1 */ + /* PORT184CR: GPIO_FN_SDHICMD1 Control */ + __raw_writeb(CR_FUNCTION1, PORT184CR); + /* PORT179CR: GPIO_FN_SDHICLK1 Control */ + __raw_writeb(CR_INPUT_ENABLE|CR_FUNCTION1, PORT179CR); + /* PORT181CR: GPIO_FN_SDHID1_3 Control */ + __raw_writeb(CR_FUNCTION1, PORT183CR); + /* PORT182CR: GPIO_FN_SDHID1_2 Control */ + __raw_writeb(CR_FUNCTION1, PORT182CR); + /* PORT183CR: GPIO_FN_SDHID1_1 Control */ + __raw_writeb(CR_FUNCTION1, PORT181CR); + /* PORT180CR: GPIO_FN_SDHID1_0 Control */ + __raw_writeb(CR_FUNCTION1, PORT180CR); + + /* Enable clock to SDHI1 hardware block */ + __raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 13), SMSTPCR3); + + /* setup SDHI hardware */ + mmc_update_progress(MMC_PROGRESS_INIT); + high_capacity = sdhi_boot_init(SDHI_BASE); + if (high_capacity < 0) + goto err; + + mmc_update_progress(MMC_PROGRESS_LOAD); + /* load kernel */ + if (sdhi_boot_do_read(SDHI_BASE, high_capacity, + 0, /* Kernel is at block 1 */ + (len + TMIO_BBS - 1) / TMIO_BBS, buf)) + goto err; + + /* Disable clock to SDHI1 hardware block */ + __raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3); + + mmc_update_progress(MMC_PROGRESS_DONE); + + return; +err: + for(;;); +} diff --git a/arch/arm/boot/compressed/sdhi-shmobile.c b/arch/arm/boot/compressed/sdhi-shmobile.c new file mode 100644 index 00000000000..bd3d4698095 --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-shmobile.c @@ -0,0 +1,449 @@ +/* + * SuperH Mobile SDHI + * + * Copyright (C) 2010 Magnus Damm + * Copyright (C) 2010 Kuninori Morimoto + * Copyright (C) 2010 Simon Horman + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Parts inspired by u-boot + */ + +#include +#include +#include +#include +#include +#include +#include + +#define OCR_FASTBOOT (1<<29) +#define OCR_HCS (1<<30) +#define OCR_BUSY (1<<31) + +#define RESP_CMD12 0x00000030 + +static inline u16 sd_ctrl_read16(void __iomem *base, int addr) +{ + return __raw_readw(base + addr); +} + +static inline u32 sd_ctrl_read32(void __iomem *base, int addr) +{ + return __raw_readw(base + addr) | + __raw_readw(base + addr + 2) << 16; +} + +static inline void sd_ctrl_write16(void __iomem *base, int addr, u16 val) +{ + __raw_writew(val, base + addr); +} + +static inline void sd_ctrl_write32(void __iomem *base, int addr, u32 val) +{ + __raw_writew(val, base + addr); + __raw_writew(val >> 16, base + addr + 2); +} + +#define ALL_ERROR (TMIO_STAT_CMD_IDX_ERR | TMIO_STAT_CRCFAIL | \ + TMIO_STAT_STOPBIT_ERR | TMIO_STAT_DATATIMEOUT | \ + TMIO_STAT_RXOVERFLOW | TMIO_STAT_TXUNDERRUN | \ + TMIO_STAT_CMDTIMEOUT | TMIO_STAT_ILL_ACCESS | \ + TMIO_STAT_ILL_FUNC) + +static int sdhi_intr(void __iomem *base) +{ + unsigned long state = sd_ctrl_read32(base, CTL_STATUS); + + if (state & ALL_ERROR) { + sd_ctrl_write32(base, CTL_STATUS, ~ALL_ERROR); + sd_ctrl_write32(base, CTL_IRQ_MASK, + ALL_ERROR | + sd_ctrl_read32(base, CTL_IRQ_MASK)); + return -EINVAL; + } + if (state & TMIO_STAT_CMDRESPEND) { + sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND); + sd_ctrl_write32(base, CTL_IRQ_MASK, + TMIO_STAT_CMDRESPEND | + sd_ctrl_read32(base, CTL_IRQ_MASK)); + return 0; + } + if (state & TMIO_STAT_RXRDY) { + sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_RXRDY); + sd_ctrl_write32(base, CTL_IRQ_MASK, + TMIO_STAT_RXRDY | TMIO_STAT_TXUNDERRUN | + sd_ctrl_read32(base, CTL_IRQ_MASK)); + return 0; + } + if (state & TMIO_STAT_DATAEND) { + sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_DATAEND); + sd_ctrl_write32(base, CTL_IRQ_MASK, + TMIO_STAT_DATAEND | + sd_ctrl_read32(base, CTL_IRQ_MASK)); + return 0; + } + + return -EAGAIN; +} + +static int sdhi_boot_wait_resp_end(void __iomem *base) +{ + int err = -EAGAIN, timeout = 10000000; + + while (timeout--) { + err = sdhi_intr(base); + if (err != -EAGAIN) + break; + udelay(1); + } + + return err; +} + +/* SDHI_CLK_CTRL */ +#define CLK_MMC_ENABLE (1 << 8) +#define CLK_MMC_INIT (1 << 6) /* clk / 256 */ + +static void sdhi_boot_mmc_clk_stop(void __iomem *base) +{ + sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, 0x0000); + msleep(10); + sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, ~CLK_MMC_ENABLE & + sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL)); + msleep(10); +} + +static void sdhi_boot_mmc_clk_start(void __iomem *base) +{ + sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, CLK_MMC_ENABLE | + sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL)); + msleep(10); + sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, CLK_MMC_ENABLE); + msleep(10); +} + +static void sdhi_boot_reset(void __iomem *base) +{ + sd_ctrl_write16(base, CTL_RESET_SD, 0x0000); + msleep(10); + sd_ctrl_write16(base, CTL_RESET_SD, 0x0001); + msleep(10); +} + +/* Set MMC clock / power. + * Note: This controller uses a simple divider scheme therefore it cannot + * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as + * MMC wont run that fast, it has to be clocked at 12MHz which is the next + * slowest setting. + */ +static int sdhi_boot_mmc_set_ios(void __iomem *base, struct mmc_ios *ios) +{ + if (sd_ctrl_read32(base, CTL_STATUS) & TMIO_STAT_CMD_BUSY) + return -EBUSY; + + if (ios->clock) + sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, + ios->clock | CLK_MMC_ENABLE); + + /* Power sequence - OFF -> ON -> UP */ + switch (ios->power_mode) { + case MMC_POWER_OFF: /* power down SD bus */ + sdhi_boot_mmc_clk_stop(base); + break; + case MMC_POWER_ON: /* power up SD bus */ + break; + case MMC_POWER_UP: /* start bus clock */ + sdhi_boot_mmc_clk_start(base); + break; + } + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x80e0); + break; + case MMC_BUS_WIDTH_4: + sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x00e0); + break; + } + + /* Let things settle. delay taken from winCE driver */ + udelay(140); + + return 0; +} + +/* These are the bitmasks the tmio chip requires to implement the MMC response + * types. Note that R1 and R6 are the same in this scheme. */ +#define RESP_NONE 0x0300 +#define RESP_R1 0x0400 +#define RESP_R1B 0x0500 +#define RESP_R2 0x0600 +#define RESP_R3 0x0700 +#define DATA_PRESENT 0x0800 +#define TRANSFER_READ 0x1000 + +static int sdhi_boot_request(void __iomem *base, struct mmc_command *cmd) +{ + int err, c = cmd->opcode; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: c |= RESP_NONE; break; + case MMC_RSP_R1: c |= RESP_R1; break; + case MMC_RSP_R1B: c |= RESP_R1B; break; + case MMC_RSP_R2: c |= RESP_R2; break; + case MMC_RSP_R3: c |= RESP_R3; break; + default: + return -EINVAL; + } + + /* No interrupts so this may not be cleared */ + sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND); + + sd_ctrl_write32(base, CTL_IRQ_MASK, TMIO_STAT_CMDRESPEND | + sd_ctrl_read32(base, CTL_IRQ_MASK)); + sd_ctrl_write32(base, CTL_ARG_REG, cmd->arg); + sd_ctrl_write16(base, CTL_SD_CMD, c); + + + sd_ctrl_write32(base, CTL_IRQ_MASK, + ~(TMIO_STAT_CMDRESPEND | ALL_ERROR) & + sd_ctrl_read32(base, CTL_IRQ_MASK)); + + err = sdhi_boot_wait_resp_end(base); + if (err) + return err; + + cmd->resp[0] = sd_ctrl_read32(base, CTL_RESPONSE); + + return 0; +} + +static int sdhi_boot_do_read_single(void __iomem *base, int high_capacity, + unsigned long block, unsigned short *buf) +{ + int err, i; + + /* CMD17 - Read */ + { + struct mmc_command cmd; + + cmd.opcode = MMC_READ_SINGLE_BLOCK | \ + TRANSFER_READ | DATA_PRESENT; + if (high_capacity) + cmd.arg = block; + else + cmd.arg = block * TMIO_BBS; + cmd.flags = MMC_RSP_R1; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + } + + sd_ctrl_write32(base, CTL_IRQ_MASK, + ~(TMIO_STAT_DATAEND | TMIO_STAT_RXRDY | + TMIO_STAT_TXUNDERRUN) & + sd_ctrl_read32(base, CTL_IRQ_MASK)); + err = sdhi_boot_wait_resp_end(base); + if (err) + return err; + + sd_ctrl_write16(base, CTL_SD_XFER_LEN, TMIO_BBS); + for (i = 0; i < TMIO_BBS / sizeof(*buf); i++) + *buf++ = sd_ctrl_read16(base, RESP_CMD12); + + err = sdhi_boot_wait_resp_end(base); + if (err) + return err; + + return 0; +} + +int sdhi_boot_do_read(void __iomem *base, int high_capacity, + unsigned long offset, unsigned short count, + unsigned short *buf) +{ + unsigned long i; + int err = 0; + + for (i = 0; i < count; i++) { + err = sdhi_boot_do_read_single(base, high_capacity, offset + i, + buf + (i * TMIO_BBS / + sizeof(*buf))); + if (err) + return err; + } + + return 0; +} + +#define VOLTAGES (MMC_VDD_32_33 | MMC_VDD_33_34) + +int sdhi_boot_init(void __iomem *base) +{ + bool sd_v2 = false, sd_v1_0 = false; + unsigned short cid; + int err, high_capacity = 0; + + sdhi_boot_mmc_clk_stop(base); + sdhi_boot_reset(base); + + /* mmc0: clock 400000Hz busmode 1 powermode 2 cs 0 Vdd 21 width 0 timing 0 */ + { + struct mmc_ios ios; + ios.power_mode = MMC_POWER_ON; + ios.bus_width = MMC_BUS_WIDTH_1; + ios.clock = CLK_MMC_INIT; + err = sdhi_boot_mmc_set_ios(base, &ios); + if (err) + return err; + } + + /* CMD0 */ + { + struct mmc_command cmd; + msleep(1); + cmd.opcode = MMC_GO_IDLE_STATE; + cmd.arg = 0; + cmd.flags = MMC_RSP_NONE; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + msleep(2); + } + + /* CMD8 - Test for SD version 2 */ + { + struct mmc_command cmd; + cmd.opcode = SD_SEND_IF_COND; + cmd.arg = (VOLTAGES != 0) << 8 | 0xaa; + cmd.flags = MMC_RSP_R1; + err = sdhi_boot_request(base, &cmd); /* Ignore error */ + if ((cmd.resp[0] & 0xff) == 0xaa) + sd_v2 = true; + } + + /* CMD55 - Get OCR (SD) */ + { + int timeout = 1000; + struct mmc_command cmd; + + cmd.arg = 0; + + do { + cmd.opcode = MMC_APP_CMD; + cmd.flags = MMC_RSP_R1; + cmd.arg = 0; + err = sdhi_boot_request(base, &cmd); + if (err) + break; + + cmd.opcode = SD_APP_OP_COND; + cmd.flags = MMC_RSP_R3; + cmd.arg = (VOLTAGES & 0xff8000); + if (sd_v2) + cmd.arg |= OCR_HCS; + cmd.arg |= OCR_FASTBOOT; + err = sdhi_boot_request(base, &cmd); + if (err) + break; + + msleep(1); + } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout); + + if (!err && timeout) { + if (!sd_v2) + sd_v1_0 = true; + high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS; + } + } + + /* CMD1 - Get OCR (MMC) */ + if (!sd_v2 && !sd_v1_0) { + int timeout = 1000; + struct mmc_command cmd; + + do { + cmd.opcode = MMC_SEND_OP_COND; + cmd.arg = VOLTAGES | OCR_HCS; + cmd.flags = MMC_RSP_R3; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + + msleep(1); + } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout); + + if (!timeout) + return -EAGAIN; + + high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS; + } + + /* CMD2 - Get CID */ + { + struct mmc_command cmd; + cmd.opcode = MMC_ALL_SEND_CID; + cmd.arg = 0; + cmd.flags = MMC_RSP_R2; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + } + + /* CMD3 + * MMC: Set the relative address + * SD: Get the relative address + * Also puts the card into the standby state + */ + { + struct mmc_command cmd; + cmd.opcode = MMC_SET_RELATIVE_ADDR; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + cid = cmd.resp[0] >> 16; + } + + /* CMD9 - Get CSD */ + { + struct mmc_command cmd; + cmd.opcode = MMC_SEND_CSD; + cmd.arg = cid << 16; + cmd.flags = MMC_RSP_R2; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + } + + /* CMD7 - Select the card */ + { + struct mmc_command cmd; + cmd.opcode = MMC_SELECT_CARD; + //cmd.arg = rca << 16; + cmd.arg = cid << 16; + //cmd.flags = MMC_RSP_R1B; + cmd.flags = MMC_RSP_R1; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + } + + /* CMD16 - Set the block size */ + { + struct mmc_command cmd; + cmd.opcode = MMC_SET_BLOCKLEN; + cmd.arg = TMIO_BBS; + cmd.flags = MMC_RSP_R1; + err = sdhi_boot_request(base, &cmd); + if (err) + return err; + } + + return high_capacity; +} diff --git a/arch/arm/boot/compressed/sdhi-shmobile.h b/arch/arm/boot/compressed/sdhi-shmobile.h new file mode 100644 index 00000000000..92eaa09f985 --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-shmobile.h @@ -0,0 +1,11 @@ +#ifndef SDHI_MOBILE_H +#define SDHI_MOBILE_H + +#include + +int sdhi_boot_do_read(void __iomem *base, int high_capacity, + unsigned long offset, unsigned short count, + unsigned short *buf); +int sdhi_boot_init(void __iomem *base); + +#endif diff --git a/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h new file mode 100644 index 00000000000..4a81b01f1e8 --- /dev/null +++ b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h @@ -0,0 +1,21 @@ +#ifndef SDHI_SH7372_H +#define SDHI_SH7372_H + +#define SDGENCNTA 0xfe40009c + +/* The countdown of SDGENCNTA is controlled by + * ZB3D2CLK which runs at 149.5MHz. + * That is 149.5ticks/us. Approximate this as 150ticks/us. + */ +static void udelay(int us) +{ + __raw_writel(us * 150, SDGENCNTA); + while(__raw_readl(SDGENCNTA)) ; +} + +static void msleep(int ms) +{ + udelay(ms * 1000); +} + +#endif diff --git a/arch/arm/mach-shmobile/include/mach/sdhi.h b/arch/arm/mach-shmobile/include/mach/sdhi.h new file mode 100644 index 00000000000..0ec9e69f2c3 --- /dev/null +++ b/arch/arm/mach-shmobile/include/mach/sdhi.h @@ -0,0 +1,16 @@ +#ifndef SDHI_H +#define SDHI_H + +/************************************************** + * + * CPU specific settings + * + **************************************************/ + +#ifdef CONFIG_ARCH_SH7372 +#include "mach/sdhi-sh7372.h" +#else +#error "unsupported CPU." +#endif + +#endif /* SDHI_H */ -- cgit v1.2.3-18-g5258 From ac8b9c1ce094d43372d0259de08045ffee745a41 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 10:22:08 +0100 Subject: ARM: entry: prefetch/data abort helpers: convert to macros Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 85 ++++++++++++++++++-------------------------- 1 file changed, 35 insertions(+), 50 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 90c62cd51ca..dbe9eb88d55 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -44,6 +44,37 @@ 9997: .endm + .macro pabt_helper + mov r0, r2 @ pass address of aborted instruction. +#ifdef MULTI_PABORT + ldr r4, .LCprocfns + mov lr, pc + ldr pc, [r4, #PROCESSOR_PABT_FUNC] +#else + bl CPU_PABORT_HANDLER +#endif + .endm + + .macro dabt_helper + + @ + @ Call the processor-specific abort handler: + @ + @ r2 - aborted context pc + @ r3 - aborted context cpsr + @ + @ The abort handler must return the aborted address in r0, and + @ the fault status register in r1. r9 must be preserved. + @ +#ifdef MULTI_DABORT + ldr r4, .LCprocfns + mov lr, pc + ldr pc, [r4, #PROCESSOR_DABT_FUNC] +#else + bl CPU_DABORT_HANDLER +#endif + .endm + #ifdef CONFIG_KPROBES .section .kprobes.text,"ax",%progbits #else @@ -159,22 +190,7 @@ __dabt_svc: tst r3, #PSR_I_BIT biceq r9, r9, #PSR_I_BIT - @ - @ Call the processor-specific abort handler: - @ - @ r2 - aborted context pc - @ r3 - aborted context cpsr - @ - @ The abort handler must return the aborted address in r0, and - @ the fault status register in r1. r9 must be preserved. - @ -#ifdef MULTI_DABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_DABT_FUNC] -#else - bl CPU_DABORT_HANDLER -#endif + dabt_helper @ @ set desired IRQ state, then call main handler @@ -298,14 +314,7 @@ __pabt_svc: tst r3, #PSR_I_BIT biceq r9, r9, #PSR_I_BIT - mov r0, r2 @ pass address of aborted instruction. -#ifdef MULTI_PABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_PABT_FUNC] -#else - bl CPU_PABORT_HANDLER -#endif + pabt_helper debug_entry r1 msr cpsr_c, r9 @ Maybe enable interrupts mov r2, sp @ regs @@ -401,23 +410,7 @@ ENDPROC(__pabt_svc) __dabt_usr: usr_entry kuser_cmpxchg_check - - @ - @ Call the processor-specific abort handler: - @ - @ r2 - aborted context pc - @ r3 - aborted context cpsr - @ - @ The abort handler must return the aborted address in r0, and - @ the fault status register in r1. - @ -#ifdef MULTI_DABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_DABT_FUNC] -#else - bl CPU_DABORT_HANDLER -#endif + dabt_helper @ @ IRQs on, then call the main handler @@ -682,15 +675,7 @@ ENDPROC(__und_usr_unknown) .align 5 __pabt_usr: usr_entry - - mov r0, r2 @ pass address of aborted instruction. -#ifdef MULTI_PABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4, #PROCESSOR_PABT_FUNC] -#else - bl CPU_PABORT_HANDLER -#endif + pabt_helper debug_entry r1 enable_irq @ Enable interrupts mov r2, sp @ regs -- cgit v1.2.3-18-g5258 From 0402becef94c43bb2bb483653a5cee2fb5049764 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Jun 2011 15:46:08 +0100 Subject: ARM: entry: prefetch/data abort helpers: avoid corrupting r4 Replace r4 with ip for calling abort helpers - ip is allowed to be corrupted by called functions in the ABI, so it makes more sense to use such a register. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index dbe9eb88d55..6855f6dd72d 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -47,9 +47,9 @@ .macro pabt_helper mov r0, r2 @ pass address of aborted instruction. #ifdef MULTI_PABORT - ldr r4, .LCprocfns + ldr ip, .LCprocfns mov lr, pc - ldr pc, [r4, #PROCESSOR_PABT_FUNC] + ldr pc, [ip, #PROCESSOR_PABT_FUNC] #else bl CPU_PABORT_HANDLER #endif @@ -67,9 +67,9 @@ @ the fault status register in r1. r9 must be preserved. @ #ifdef MULTI_DABORT - ldr r4, .LCprocfns + ldr ip, .LCprocfns mov lr, pc - ldr pc, [r4, #PROCESSOR_DABT_FUNC] + ldr pc, [ip, #PROCESSOR_DABT_FUNC] #else bl CPU_DABORT_HANDLER #endif -- cgit v1.2.3-18-g5258 From be020f8618caa0670a2a5b5a5df79549520f7867 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 13:42:01 +0100 Subject: ARM: entry: abort-macro: specify registers to be used for macros Require all callers of abort macros to specify the registers to be used. This improves the documentation at the callsites as to which registers are being used by this assembly code. Signed-off-by: Russell King --- arch/arm/mm/abort-ev4t.S | 2 +- arch/arm/mm/abort-ev5t.S | 4 ++-- arch/arm/mm/abort-ev5tj.S | 4 ++-- arch/arm/mm/abort-ev6.S | 4 ++-- arch/arm/mm/abort-macro.S | 30 +++++++++++++++--------------- 5 files changed, 22 insertions(+), 22 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S index b6282548f92..9910123079c 100644 --- a/arch/arm/mm/abort-ev4t.S +++ b/arch/arm/mm/abort-ev4t.S @@ -22,7 +22,7 @@ ENTRY(v4t_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - do_thumb_abort + do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 ldreq r3, [r2] @ read aborted ARM instruction bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ check write diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 02251b526c0..800e8d42d39 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S @@ -22,10 +22,10 @@ ENTRY(v5t_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - do_thumb_abort + do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 ldreq r3, [r2] @ read aborted ARM instruction bic r1, r1, #1 << 11 @ clear bits 11 of FSR - do_ldrd_abort + do_ldrd_abort tmp=r2, insn=r3 tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 mov pc, lr diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index bce68d601c8..bcb58d2fc11 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S @@ -25,9 +25,9 @@ ENTRY(v5tj_early_abort) bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #PSR_J_BIT @ Java? movne pc, lr - do_thumb_abort + do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 ldreq r3, [r2] @ read aborted ARM instruction - do_ldrd_abort + do_ldrd_abort tmp=r2, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 1478aa52214..ef526e702a5 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -35,12 +35,12 @@ ENTRY(v6_early_abort) bic r1, r1, #1 << 11 @ clear bit 11 of FSR tst r3, #PSR_J_BIT @ Java? movne pc, lr - do_thumb_abort + do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 ldreq r3, [r2] @ read aborted ARM instruction #ifdef CONFIG_CPU_ENDIAN_BE8 reveq r3, r3 #endif - do_ldrd_abort + do_ldrd_abort tmp=r2, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index d7cb1bfa51a..8d3b9f999d1 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -9,33 +9,33 @@ * */ - .macro do_thumb_abort - tst r3, #PSR_T_BIT + .macro do_thumb_abort, fsr, pc, psr, tmp + tst \psr, #PSR_T_BIT beq not_thumb - ldrh r3, [r2] @ Read aborted Thumb instruction - and r3, r3, # 0xfe00 @ Mask opcode field - cmp r3, # 0x5600 @ Is it ldrsb? - orreq r3, r3, #1 << 11 @ Set L-bit if yes - tst r3, #1 << 11 @ L = 0 -> write - orreq r1, r1, #1 << 11 @ yes. + ldrh \tmp, [\pc] @ Read aborted Thumb instruction + and \tmp, \tmp, # 0xfe00 @ Mask opcode field + cmp \tmp, # 0x5600 @ Is it ldrsb? + orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes + tst \tmp, #1 << 11 @ L = 0 -> write + orreq \psr, \psr, #1 << 11 @ yes. mov pc, lr not_thumb: .endm /* - * We check for the following insturction encoding for LDRD. + * We check for the following instruction encoding for LDRD. * - * [27:25] == 0 + * [27:25] == 000 * [7:4] == 1101 * [20] == 0 */ - .macro do_ldrd_abort - tst r3, #0x0e000000 @ [27:25] == 0 + .macro do_ldrd_abort, tmp, insn + tst \insn, #0x0e000000 @ [27:25] == 0 bne not_ldrd - and r2, r3, #0x000000f0 @ [7:4] == 1101 - cmp r2, #0x000000d0 + and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 + cmp \tmp, #0x000000d0 bne not_ldrd - tst r3, #1 << 20 @ [20] == 0 + tst \insn, #1 << 20 @ [20] == 0 moveq pc, lr not_ldrd: .endm -- cgit v1.2.3-18-g5258 From 198a0a927ab9c52a68297120ee6dd4e36a975b0e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 13:51:44 +0100 Subject: ARM: entry: abort-macro: simplify do_ldrd_abort We can test bits 27:25 and 20 of the instruction at the same time; there's no need to separate out the check of bit 20. Signed-off-by: Russell King --- arch/arm/mm/abort-macro.S | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index 8d3b9f999d1..af97a10bc5e 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -30,12 +30,10 @@ not_thumb: * [20] == 0 */ .macro do_ldrd_abort, tmp, insn - tst \insn, #0x0e000000 @ [27:25] == 0 + tst \insn, #0x0e100000 @ [27:25,20] == 0 bne not_ldrd and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 cmp \tmp, #0x000000d0 - bne not_ldrd - tst \insn, #1 << 20 @ [20] == 0 moveq pc, lr not_ldrd: .endm -- cgit v1.2.3-18-g5258 From 1613cc1119ecdb1bdb950da53065e615e4c4b8db Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Jun 2011 10:57:57 +0100 Subject: ARM: entry: no need to increase preempt count for IRQ handlers irq_enter() and irq_exit() already take care of the preempt_count handling for interrupts, which increment and decrement the hardirq bits of the preempt count. So we can remove the preempt count handing in our IRQ entry/exit assembly, like x86 did some 9 years ago. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 27 ++++----------------------- 1 file changed, 4 insertions(+), 23 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 6855f6dd72d..1e5f387c70a 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -220,16 +220,12 @@ __irq_svc: #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif -#ifdef CONFIG_PREEMPT - get_thread_info tsk - ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - add r7, r8, #1 @ increment it - str r7, [tsk, #TI_PREEMPT] -#endif irq_handler + #ifdef CONFIG_PREEMPT - str r8, [tsk, #TI_PREEMPT] @ restore preempt count + get_thread_info tsk + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 movne r0, #0 @ force flags to 0 @@ -432,23 +428,8 @@ __irq_usr: bl trace_hardirqs_off #endif - get_thread_info tsk -#ifdef CONFIG_PREEMPT - ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - add r7, r8, #1 @ increment it - str r7, [tsk, #TI_PREEMPT] -#endif - irq_handler -#ifdef CONFIG_PREEMPT - ldr r0, [tsk, #TI_PREEMPT] - str r8, [tsk, #TI_PREEMPT] - teq r0, r7 - ARM( strne r0, [r0, -r0] ) - THUMB( movne r0, #0 ) - THUMB( strne r0, [r0] ) -#endif - + get_thread_info tsk mov why, #0 b ret_to_user_from_irq UNWIND(.fnend ) -- cgit v1.2.3-18-g5258 From fbab1c809467efe001194ab8bb17f0f451a17f97 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Jun 2011 16:57:50 +0100 Subject: ARM: entry: no need to check parent IRQ mask in IRQ handler return There's no point checking to see whether IRQs were masked in the parent context when returning from IRQ handling - the fact that we're handling an IRQ means that the parent context must have had IRQs unmasked. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 1e5f387c70a..fd42e667a81 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -234,8 +234,9 @@ __irq_svc: #endif ldr r4, [sp, #S_PSR] @ irqs are already disabled #ifdef CONFIG_TRACE_IRQFLAGS - tst r4, #PSR_I_BIT - bleq trace_hardirqs_on + @ The parent context IRQs must have been enabled to get here in + @ the first place, so there's no point checking the PSR I bit. + bl trace_hardirqs_on #endif svc_exit r4 @ return from exception UNWIND(.fnend ) -- cgit v1.2.3-18-g5258 From f12482c9393da2c1f5cb3217f29aa79c653dd980 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 22 Jun 2011 15:30:51 +0100 Subject: ARM: 6974/1: pmu: refactor reservation Currently, PMU platform_device reservation relies on some minor abuse of the platform_device::id field for determining the type of PMU. This is problematic for device tree based probing, where the ID cannot be controlled. This patch removes reliance on the id field, and depends on each PMU's platform driver to figure out which type it is. As all PMUs handled by the current platform_driver name "arm-pmu" are CPU PMUs, this convention is hardcoded. New PMU types can be supported through the use of {of,platform}_device_id tables Signed-off-by: Mark Rutland Acked-by: Jamie Iles Acked-by: Will Deacon Cc: Rob Herring Cc: Mathieu Desnoyers Signed-off-by: Russell King --- arch/arm/include/asm/pmu.h | 2 +- arch/arm/kernel/perf_event.c | 4 ++-- arch/arm/kernel/pmu.c | 33 +++++++++++++++++++-------------- 3 files changed, 22 insertions(+), 17 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 7544ce6b481..67c70a31a1b 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -52,7 +52,7 @@ reserve_pmu(enum arm_pmu_type device); * a cookie. */ extern int -release_pmu(struct platform_device *pdev); +release_pmu(enum arm_pmu_type type); /** * init_pmu() - Initialise the PMU. diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d53c0abc4dd..a6c643f2d2c 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -435,7 +435,7 @@ armpmu_reserve_hardware(void) if (irq >= 0) free_irq(irq, NULL); } - release_pmu(pmu_device); + release_pmu(ARM_PMU_DEVICE_CPU); pmu_device = NULL; } @@ -454,7 +454,7 @@ armpmu_release_hardware(void) } armpmu->stop(); - release_pmu(pmu_device); + release_pmu(ARM_PMU_DEVICE_CPU); pmu_device = NULL; } diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index 2c79eec1926..87942b931c6 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -25,36 +25,41 @@ static volatile long pmu_lock; static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; -static int __devinit pmu_device_probe(struct platform_device *pdev) +static int __devinit pmu_register(struct platform_device *pdev, + enum arm_pmu_type type) { - - if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) { + if (type < 0 || type >= ARM_NUM_PMU_DEVICES) { pr_warning("received registration request for unknown " - "device %d\n", pdev->id); + "device %d\n", type); return -EINVAL; } - if (pmu_devices[pdev->id]) + if (pmu_devices[type]) pr_warning("registering new PMU device type %d overwrites " - "previous registration!\n", pdev->id); + "previous registration!\n", type); else pr_info("registered new PMU device of type %d\n", - pdev->id); + type); - pmu_devices[pdev->id] = pdev; + pmu_devices[type] = pdev; return 0; } -static struct platform_driver pmu_driver = { +static int __devinit armpmu_device_probe(struct platform_device *pdev) +{ + return pmu_register(pdev, ARM_PMU_DEVICE_CPU); +} + +static struct platform_driver armpmu_driver = { .driver = { .name = "arm-pmu", }, - .probe = pmu_device_probe, + .probe = armpmu_device_probe, }; static int __init register_pmu_driver(void) { - return platform_driver_register(&pmu_driver); + return platform_driver_register(&armpmu_driver); } device_initcall(register_pmu_driver); @@ -77,11 +82,11 @@ reserve_pmu(enum arm_pmu_type device) EXPORT_SYMBOL_GPL(reserve_pmu); int -release_pmu(struct platform_device *pdev) +release_pmu(enum arm_pmu_type device) { - if (WARN_ON(pdev != pmu_devices[pdev->id])) + if (WARN_ON(!pmu_devices[device])) return -EINVAL; - clear_bit_unlock(pdev->id, &pmu_lock); + clear_bit_unlock(device, &pmu_lock); return 0; } EXPORT_SYMBOL_GPL(release_pmu); -- cgit v1.2.3-18-g5258 From ae0c3751ab08d3fe039d48935e9ad2c46711b23b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 22 Jun 2011 15:32:48 +0100 Subject: ARM: 6975/1: pmu: reject duplicate PMU registrations Currently, the PMU reservation framework allows for multiple PMUs of the same type to register themselves. This can lead to a bug with the sequence: register_pmu(pmu1); reserve_pmu(pmu_type); register_pmu(pmu2); release_pmu(pmu1); Here, pmu1 cannot be released, and pmu2 cannot be reserved. This patch modifies register_pmu to reject registrations where a PMU is already present, preventing this problem. PMUs which can have multiple instances should not use the PMU reservation framework. Signed-off-by: Mark Rutland Acked-by: Jamie Iles Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/pmu.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index 87942b931c6..de6b1b0860c 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -34,13 +34,13 @@ static int __devinit pmu_register(struct platform_device *pdev, return -EINVAL; } - if (pmu_devices[type]) - pr_warning("registering new PMU device type %d overwrites " - "previous registration!\n", type); - else - pr_info("registered new PMU device of type %d\n", - type); + if (pmu_devices[type]) { + pr_warning("rejecting duplicate registration of PMU device " + "type %d.", type); + return -ENOSPC; + } + pr_info("registered new PMU device of type %d\n", type); pmu_devices[type] = pdev; return 0; } -- cgit v1.2.3-18-g5258 From e73c34c3d522a60d9f7b38a7683076362bad98f5 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 22 Jun 2011 15:33:55 +0100 Subject: ARM: 6976/1: pmu: add OF probing support This is based on an earlier patch from Rob Herring > Add OF match table to enable OF style driver binding. The dts entry is like > this: > > pmu { > compatible = "arm,cortex-a9-pmu"; > interrupts = <100 101>; > }; > > The use of pdev->id as an index breaks with OF device binding, so set the type > based on the OF compatible string. This modification sets the PMU hardware type based on data embedded in the binding, allowing easy addition of new PMU types in future. Support for new PMU types not provided by devicetree can be added later using platform_device_id tables in a similar fashion. Signed-off-by: Mark Rutland Acked-by: Jamie Iles Acked-by: Rob Herring Cc: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/pmu.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index de6b1b0860c..2ce00be697e 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -45,14 +46,45 @@ static int __devinit pmu_register(struct platform_device *pdev, return 0; } +#define OF_MATCH_PMU(_name, _type) { \ + .compatible = _name, \ + .data = (void *)_type, \ +} + +#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU) + +static struct of_device_id armpmu_of_device_ids[] = { + OF_MATCH_CPU("arm,cortex-a9-pmu"), + OF_MATCH_CPU("arm,cortex-a8-pmu"), + OF_MATCH_CPU("arm,arm1136-pmu"), + OF_MATCH_CPU("arm,arm1176-pmu"), + {}, +}; + +enum arm_pmu_type armpmu_device_type(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + + /* provided by of_device_id table */ + if (pdev->dev.of_node) { + of_id = of_match_device(armpmu_of_device_ids, &pdev->dev); + BUG_ON(!of_id); + return (enum arm_pmu_type)of_id->data; + } + + /* Provided by a 'legacy' platform_device */ + return ARM_PMU_DEVICE_CPU; +} + static int __devinit armpmu_device_probe(struct platform_device *pdev) { - return pmu_register(pdev, ARM_PMU_DEVICE_CPU); + return pmu_register(pdev, armpmu_device_type(pdev)); } static struct platform_driver armpmu_driver = { .driver = { .name = "arm-pmu", + .of_match_table = armpmu_of_device_ids, }, .probe = armpmu_device_probe, }; -- cgit v1.2.3-18-g5258 From e4b6381009d740bd3a97e6b841d8efe7fc70c1b7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 22 Jun 2011 15:34:56 +0100 Subject: ARM: 6977/1: pmu: add platform_device_id table support This patch adds support for platform_device_id tables, allowing new PMU types to be registered with the correct type, without requiring new platform_driver shims to provide the type. An single entry for existing devices is provided. Macros matching functionality of the of_device_id table macros are provided for convenience. Signed-off-by: Mark Rutland Acked-by: Jamie Iles Cc: Rob Herring Cc: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/pmu.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index 2ce00be697e..2b70709376c 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -61,9 +61,22 @@ static struct of_device_id armpmu_of_device_ids[] = { {}, }; +#define PLAT_MATCH_PMU(_name, _type) { \ + .name = _name, \ + .driver_data = _type, \ +} + +#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU) + +static struct platform_device_id armpmu_plat_device_ids[] = { + PLAT_MATCH_CPU("arm-pmu"), + {}, +}; + enum arm_pmu_type armpmu_device_type(struct platform_device *pdev) { const struct of_device_id *of_id; + const struct platform_device_id *pdev_id; /* provided by of_device_id table */ if (pdev->dev.of_node) { @@ -72,8 +85,10 @@ enum arm_pmu_type armpmu_device_type(struct platform_device *pdev) return (enum arm_pmu_type)of_id->data; } - /* Provided by a 'legacy' platform_device */ - return ARM_PMU_DEVICE_CPU; + /* Provided by platform_device_id table */ + pdev_id = platform_get_device_id(pdev); + BUG_ON(!pdev_id); + return pdev_id->driver_data; } static int __devinit armpmu_device_probe(struct platform_device *pdev) @@ -87,6 +102,7 @@ static struct platform_driver armpmu_driver = { .of_match_table = armpmu_of_device_ids, }, .probe = armpmu_device_probe, + .id_table = armpmu_plat_device_ids, }; static int __init register_pmu_driver(void) -- cgit v1.2.3-18-g5258 From 46e130d298a384b677426e19faec311749ff2677 Mon Sep 17 00:00:00 2001 From: Jean Pihet Date: Wed, 29 Jun 2011 18:40:23 +0200 Subject: ARM: pm: omap3: run the ASM sleep code from DDR Most of the ASM sleep code (in arch/arm/mach-omap2/sleep34xx.S) is copied to internal SRAM at boot and after wake-up from CORE OFF mode. However only a small part of the code really needs to run from internal SRAM. This fix lets most of the ASM idle code run from the DDR in order to minimize the SRAM usage and the overhead in the code copy. The only pieces of code that are mandatory in SRAM are: - the i443 erratum WA, - the i581 erratum WA, - the security extension code. SRAM usage: - original code: . 560 bytes for omap3_sram_configure_core_dpll (used by DVFS), . 852 bytes for omap_sram_idle (used by suspend/resume in RETention), . 124 bytes for es3_sdrc_fix (used by suspend/resume in OFF mode on ES3.x), . 108 bytes for save_secure_ram_context (used on HS parts only). With this fix the usage for suspend/resume in RETention goes down 288 bytes, so the gain in SRAM usage for suspend/resume is 564 bytes. Also fixed the SRAM initialization sequence to avoid an unnecessary copy to SRAM at boot time and for readability. Tested on Beagleboard (ES2.x) in idle with full RET and OFF modes. Kevin Hilman tested retention and off on 3430/n900, 3530/Overo and 3630/Zoom3 Signed-off-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Kevin Hilman Signed-off-by: Russell King --- arch/arm/mach-omap2/pm.h | 20 ++- arch/arm/mach-omap2/pm34xx.c | 20 +-- arch/arm/mach-omap2/sleep34xx.S | 303 +++++++++++++++++++++++----------------- arch/arm/plat-omap/sram.c | 15 +- 4 files changed, 206 insertions(+), 152 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index 45bcfce7735..a4ec213e30c 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h @@ -88,18 +88,28 @@ extern int pm_dbg_regset_init(int reg_set); #define pm_dbg_regset_init(reg_set) do {} while (0); #endif /* CONFIG_PM_DEBUG */ +/* 24xx */ extern void omap24xx_idle_loop_suspend(void); +extern unsigned int omap24xx_idle_loop_suspend_sz; extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl, void __iomem *sdrc_power); +extern unsigned int omap24xx_cpu_suspend_sz; + +/* 3xxx */ extern void omap34xx_cpu_suspend(u32 *addr, int save_state); -extern int save_secure_ram_context(u32 *addr); -extern void omap3_save_scratchpad_contents(void); -extern unsigned int omap24xx_idle_loop_suspend_sz; +/* omap3_do_wfi function pointer and size, for copy to SRAM */ +extern void omap3_do_wfi(void); +extern unsigned int omap3_do_wfi_sz; +/* ... and its pointer from SRAM after copy */ +extern void (*omap3_do_wfi_sram)(void); + +/* save_secure_ram_context function pointer and size, for copy to SRAM */ +extern int save_secure_ram_context(u32 *addr); extern unsigned int save_secure_ram_context_sz; -extern unsigned int omap24xx_cpu_suspend_sz; -extern unsigned int omap34xx_cpu_suspend_sz; + +extern void omap3_save_scratchpad_contents(void); #define PM_RTA_ERRATUM_i608 (1 << 0) #define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1) diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 3e9a13e1ac5..e1c79bae767 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -78,9 +78,8 @@ struct power_state { static LIST_HEAD(pwrst_list); -static void (*_omap_sram_idle)(u32 *addr, int save_state); - static int (*_omap_save_secure_sram)(u32 *addr); +void (*omap3_do_wfi_sram)(void); static struct powerdomain *mpu_pwrdm, *neon_pwrdm; static struct powerdomain *core_pwrdm, *per_pwrdm; @@ -309,7 +308,7 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) static void omap34xx_do_sram_idle(unsigned long save_state) { - _omap_sram_idle(omap3_arm_context, save_state); + omap34xx_cpu_suspend(omap3_arm_context, save_state); } void omap_sram_idle(void) @@ -328,9 +327,6 @@ void omap_sram_idle(void) int core_prev_state, per_prev_state; u32 sdrc_pwr = 0; - if (!_omap_sram_idle) - return; - pwrdm_clear_all_prev_pwrst(mpu_pwrdm); pwrdm_clear_all_prev_pwrst(neon_pwrdm); pwrdm_clear_all_prev_pwrst(core_pwrdm); @@ -826,10 +822,17 @@ static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) return 0; } +/* + * Push functions to SRAM + * + * The minimum set of functions is pushed to SRAM for execution: + * - omap3_do_wfi for erratum i581 WA, + * - save_secure_ram_context for security extensions. + */ void omap_push_sram_idle(void) { - _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, - omap34xx_cpu_suspend_sz); + omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); + if (omap_type() != OMAP2_DEVICE_TYPE_GP) _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, save_secure_ram_context_sz); @@ -894,7 +897,6 @@ static int __init omap3_pm_init(void) per_clkdm = clkdm_lookup("per_clkdm"); core_clkdm = clkdm_lookup("core_clkdm"); - omap_push_sram_idle(); #ifdef CONFIG_SUSPEND suspend_set_ops(&omap_pm_ops); #endif /* CONFIG_SUSPEND */ diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index d18f52e46c7..17dbc5af691 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -139,8 +139,10 @@ ENTRY(save_secure_ram_context_sz) * * * Notes: - * - this code gets copied to internal SRAM at boot and after wake-up - * from OFF mode. The execution pointer in SRAM is _omap_sram_idle. + * - only the minimum set of functions gets copied to internal SRAM at boot + * and after wake-up from OFF mode, cf. omap_push_sram_idle. The function + * pointers in SDRAM or SRAM are called depending on the desired low power + * target state. * - when the OMAP wakes up it continues at different execution points * depending on the low power mode (non-OFF vs OFF modes), * cf. 'Resume path for xxx mode' comments. @@ -158,9 +160,15 @@ ENTRY(omap34xx_cpu_suspend) * 3 - Both L1 and L2 lost and logic lost */ - /* Directly jump to WFI is the context save is not required */ - cmp r1, #0x0 - beq omap3_do_wfi + /* + * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi) + * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram) + */ + ldr r4, omap3_do_wfi_sram_addr + ldr r5, [r4] + cmp r1, #0x0 @ If no context save required, + bxeq r5 @ jump to the WFI code in SRAM + /* Otherwise fall through to the save context code */ save_context_wfi: @@ -213,7 +221,32 @@ save_context_wfi: THUMB( nop ) .arm -omap3_do_wfi: + b omap3_do_wfi + +/* + * Local variables + */ +omap3_do_wfi_sram_addr: + .word omap3_do_wfi_sram +kernel_flush: + .word v7_flush_dcache_all + +/* =================================== + * == WFI instruction => Enter idle == + * =================================== + */ + +/* + * Do WFI instruction + * Includes the resume path for non-OFF modes + * + * This code gets copied to internal SRAM and is accessible + * from both SDRAM and SRAM: + * - executed from SRAM for non-off modes (omap3_do_wfi_sram), + * - executed from SDRAM for OFF mode (omap3_do_wfi). + */ + .align 3 +ENTRY(omap3_do_wfi) ldr r4, sdrc_power @ read the SDRC_POWER register ldr r5, [r4] @ read the contents of SDRC_POWER orr r5, r5, #0x40 @ enable self refresh on idle req @@ -245,8 +278,86 @@ omap3_do_wfi: nop nop nop - bl wait_sdrc_ok +/* + * This function implements the erratum ID i581 WA: + * SDRC state restore before accessing the SDRAM + * + * Only used at return from non-OFF mode. For OFF + * mode the ROM code configures the SDRC and + * the DPLL before calling the restore code directly + * from DDR. + */ + +/* Make sure SDRC accesses are ok */ +wait_sdrc_ok: + +/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */ + ldr r4, cm_idlest_ckgen +wait_dpll3_lock: + ldr r5, [r4] + tst r5, #1 + beq wait_dpll3_lock + + ldr r4, cm_idlest1_core +wait_sdrc_ready: + ldr r5, [r4] + tst r5, #0x2 + bne wait_sdrc_ready + /* allow DLL powerdown upon hw idle req */ + ldr r4, sdrc_power + ldr r5, [r4] + bic r5, r5, #0x40 + str r5, [r4] + +/* + * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a + * base instead. + * Be careful not to clobber r7 when maintaing this code. + */ + +is_dll_in_lock_mode: + /* Is dll in lock mode? */ + ldr r4, sdrc_dlla_ctrl + ldr r5, [r4] + tst r5, #0x4 + bne exit_nonoff_modes @ Return if locked + /* wait till dll locks */ + adr r7, kick_counter +wait_dll_lock_timed: + ldr r4, wait_dll_lock_counter + add r4, r4, #1 + str r4, [r7, #wait_dll_lock_counter - kick_counter] + ldr r4, sdrc_dlla_status + /* Wait 20uS for lock */ + mov r6, #8 +wait_dll_lock: + subs r6, r6, #0x1 + beq kick_dll + ldr r5, [r4] + and r5, r5, #0x4 + cmp r5, #0x4 + bne wait_dll_lock + b exit_nonoff_modes @ Return when locked + + /* disable/reenable DLL if not locked */ +kick_dll: + ldr r4, sdrc_dlla_ctrl + ldr r5, [r4] + mov r6, r5 + bic r6, #(1<<3) @ disable dll + str r6, [r4] + dsb + orr r6, r6, #(1<<3) @ enable dll + str r6, [r4] + dsb + ldr r4, kick_counter + add r4, r4, #1 + str r4, [r7] @ kick_counter + b wait_dll_lock_timed + +exit_nonoff_modes: + /* Re-enable C-bit if needed */ mrc p15, 0, r0, c1, c0, 0 tst r0, #(1 << 2) @ Check C bit enabled? orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared @@ -260,6 +371,31 @@ omap3_do_wfi: */ ldmfd sp!, {r4 - r11, pc} @ restore regs and return +/* + * Local variables + */ +sdrc_power: + .word SDRC_POWER_V +cm_idlest1_core: + .word CM_IDLEST1_CORE_V +cm_idlest_ckgen: + .word CM_IDLEST_CKGEN_V +sdrc_dlla_status: + .word SDRC_DLLA_STATUS_V +sdrc_dlla_ctrl: + .word SDRC_DLLA_CTRL_V + /* + * When exporting to userspace while the counters are in SRAM, + * these 2 words need to be at the end to facilitate retrival! + */ +kick_counter: + .word 0 +wait_dll_lock_counter: + .word 0 + +ENTRY(omap3_do_wfi_sz) + .word . - omap3_do_wfi + /* * ============================== @@ -275,13 +411,17 @@ omap3_do_wfi: * restore_es3: applies to 34xx >= ES3.0 * restore_3630: applies to 36xx * restore: common code for 3xxx + * + * Note: when back from CORE and MPU OFF mode we are running + * from SDRAM, without MMU, without the caches and prediction. + * Also the SRAM content has been cleared. */ ENTRY(omap3_restore_es3) ldr r5, pm_prepwstst_core_p ldr r4, [r5] and r4, r4, #0x3 cmp r4, #0x0 @ Check if previous power state of CORE is OFF - bne omap3_restore + bne omap3_restore @ Fall through to OMAP3 common code adr r0, es3_sdrc_fix ldr r1, sram_base ldr r2, es3_sdrc_fix_sz @@ -293,7 +433,7 @@ copy_to_sram: bne copy_to_sram ldr r1, sram_base blx r1 - b omap3_restore + b omap3_restore @ Fall through to OMAP3 common code ENDPROC(omap3_restore_es3) ENTRY(omap3_restore_3630) @@ -301,7 +441,7 @@ ENTRY(omap3_restore_3630) ldr r2, [r1] and r2, r2, #0x3 cmp r2, #0x0 @ Check if previous power state of CORE is OFF - bne omap3_restore + bne omap3_restore @ Fall through to OMAP3 common code /* Disable RTA before giving control */ ldr r1, control_mem_rta mov r2, #OMAP36XX_RTA_DISABLE @@ -404,11 +544,32 @@ ENDPROC(omap3_restore) .ltorg +/* + * Local variables + */ +pm_prepwstst_core_p: + .word PM_PREPWSTST_CORE_P +pm_pwstctrl_mpu: + .word PM_PWSTCTRL_MPU_P +scratchpad_base: + .word SCRATCHPAD_BASE_P +sram_base: + .word SRAM_BASE_P + 0x8000 +control_stat: + .word CONTROL_STAT +control_mem_rta: + .word CONTROL_MEM_RTA_CTRL +l2dis_3630: + .word 0 + /* * Internal functions */ -/* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */ +/* + * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 + * Copied to and run from SRAM in order to reconfigure the SDRC parameters. + */ .text .align 3 ENTRY(es3_sdrc_fix) @@ -438,6 +599,9 @@ ENTRY(es3_sdrc_fix) str r5, [r4] @ kick off refreshes bx lr +/* + * Local variables + */ .align sdrc_syscfg: .word SDRC_SYSCONFIG_P @@ -456,120 +620,3 @@ sdrc_manual_1: ENDPROC(es3_sdrc_fix) ENTRY(es3_sdrc_fix_sz) .word . - es3_sdrc_fix - -/* - * This function implements the erratum ID i581 WA: - * SDRC state restore before accessing the SDRAM - * - * Only used at return from non-OFF mode. For OFF - * mode the ROM code configures the SDRC and - * the DPLL before calling the restore code directly - * from DDR. - */ - -/* Make sure SDRC accesses are ok */ -wait_sdrc_ok: - -/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */ - ldr r4, cm_idlest_ckgen -wait_dpll3_lock: - ldr r5, [r4] - tst r5, #1 - beq wait_dpll3_lock - - ldr r4, cm_idlest1_core -wait_sdrc_ready: - ldr r5, [r4] - tst r5, #0x2 - bne wait_sdrc_ready - /* allow DLL powerdown upon hw idle req */ - ldr r4, sdrc_power - ldr r5, [r4] - bic r5, r5, #0x40 - str r5, [r4] - -/* - * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a - * base instead. - * Be careful not to clobber r7 when maintaing this code. - */ - -is_dll_in_lock_mode: - /* Is dll in lock mode? */ - ldr r4, sdrc_dlla_ctrl - ldr r5, [r4] - tst r5, #0x4 - bxne lr @ Return if locked - /* wait till dll locks */ - adr r7, kick_counter -wait_dll_lock_timed: - ldr r4, wait_dll_lock_counter - add r4, r4, #1 - str r4, [r7, #wait_dll_lock_counter - kick_counter] - ldr r4, sdrc_dlla_status - /* Wait 20uS for lock */ - mov r6, #8 -wait_dll_lock: - subs r6, r6, #0x1 - beq kick_dll - ldr r5, [r4] - and r5, r5, #0x4 - cmp r5, #0x4 - bne wait_dll_lock - bx lr @ Return when locked - - /* disable/reenable DLL if not locked */ -kick_dll: - ldr r4, sdrc_dlla_ctrl - ldr r5, [r4] - mov r6, r5 - bic r6, #(1<<3) @ disable dll - str r6, [r4] - dsb - orr r6, r6, #(1<<3) @ enable dll - str r6, [r4] - dsb - ldr r4, kick_counter - add r4, r4, #1 - str r4, [r7] @ kick_counter - b wait_dll_lock_timed - - .align -cm_idlest1_core: - .word CM_IDLEST1_CORE_V -cm_idlest_ckgen: - .word CM_IDLEST_CKGEN_V -sdrc_dlla_status: - .word SDRC_DLLA_STATUS_V -sdrc_dlla_ctrl: - .word SDRC_DLLA_CTRL_V -pm_prepwstst_core_p: - .word PM_PREPWSTST_CORE_P -pm_pwstctrl_mpu: - .word PM_PWSTCTRL_MPU_P -scratchpad_base: - .word SCRATCHPAD_BASE_P -sram_base: - .word SRAM_BASE_P + 0x8000 -sdrc_power: - .word SDRC_POWER_V -control_stat: - .word CONTROL_STAT -control_mem_rta: - .word CONTROL_MEM_RTA_CTRL -kernel_flush: - .word v7_flush_dcache_all -l2dis_3630: - .word 0 - /* - * When exporting to userspace while the counters are in SRAM, - * these 2 words need to be at the end to facilitate retrival! - */ -kick_counter: - .word 0 -wait_dll_lock_counter: - .word 0 -ENDPROC(omap34xx_cpu_suspend) - -ENTRY(omap34xx_cpu_suspend_sz) - .word . - omap34xx_cpu_suspend diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 6af3d0b1f8d..363c91e44ef 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c @@ -394,20 +394,15 @@ void omap3_sram_restore_context(void) } #endif /* CONFIG_PM */ -static int __init omap34xx_sram_init(void) -{ - _omap3_sram_configure_core_dpll = - omap_sram_push(omap3_sram_configure_core_dpll, - omap3_sram_configure_core_dpll_sz); - omap_push_sram_idle(); - return 0; -} -#else +#endif /* CONFIG_ARCH_OMAP3 */ + static inline int omap34xx_sram_init(void) { +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) + omap3_sram_restore_context(); +#endif return 0; } -#endif int __init omap_sram_init(void) { -- cgit v1.2.3-18-g5258 From 4d4d6fbb7c3125f17a4864215191e54b975cfb4f Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 29 Jun 2011 10:13:04 +0000 Subject: ARM: mach-shmobile: make a struct in board-ap4evb.c static struct soc_camera_link imx074_link in board-ap4evb.c doesn't have to be global. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/board-ap4evb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index f6b687f61c2..803bc6edfca 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -913,7 +913,7 @@ static struct i2c_board_info imx074_info = { I2C_BOARD_INFO("imx074", 0x1a), }; -struct soc_camera_link imx074_link = { +static struct soc_camera_link imx074_link = { .bus_id = 0, .board_info = &imx074_info, .i2c_adapter_id = 0, -- cgit v1.2.3-18-g5258 From cbe263497def23befb6f475977661bae5d1f82e4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 30 Jun 2011 08:45:49 +0100 Subject: ARM: pm: omap3: move saving of the auxiliary control registers to C Move the saving of the auxiliary control registers into C; there's no need for this to be in assembly code. This results in less assembly code to deal with in OMAP. Kevin tested full-chip retention and off on 3430/n900, 3530/Overo and 3630/Zoom3. Tested-by: Kevin Hilman Signed-off-by: Russell King --- arch/arm/mach-omap2/pm.h | 2 +- arch/arm/mach-omap2/pm34xx.c | 19 ++++++++++++++++++- arch/arm/mach-omap2/sleep34xx.S | 12 ++---------- 3 files changed, 21 insertions(+), 12 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index a4ec213e30c..04ee5664612 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h @@ -97,7 +97,7 @@ extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl, extern unsigned int omap24xx_cpu_suspend_sz; /* 3xxx */ -extern void omap34xx_cpu_suspend(u32 *addr, int save_state); +extern void omap34xx_cpu_suspend(int save_state); /* omap3_do_wfi function pointer and size, for copy to SRAM */ extern void omap3_do_wfi(void); diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index e1c79bae767..7238a63e24e 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -306,9 +306,24 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) return IRQ_HANDLED; } +static void omap34xx_save_context(u32 *save) +{ + u32 val; + + /* Read Auxiliary Control Register */ + asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val)); + *save++ = 1; + *save++ = val; + + /* Read L2 AUX ctrl register */ + asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val)); + *save++ = 1; + *save++ = val; +} + static void omap34xx_do_sram_idle(unsigned long save_state) { - omap34xx_cpu_suspend(omap3_arm_context, save_state); + omap34xx_cpu_suspend(save_state); } void omap_sram_idle(void) @@ -408,6 +423,8 @@ void omap_sram_idle(void) * get saved. The rest is placed on the stack, and restored * from there before resuming. */ + if (save_state) + omap34xx_save_context(omap3_arm_context); if (save_state == 1 || save_state == 3) cpu_suspend(save_state, omap34xx_do_sram_idle); else diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index 17dbc5af691..f2ea1bd1c69 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -152,8 +152,7 @@ ENTRY(omap34xx_cpu_suspend) stmfd sp!, {r4 - r11, lr} @ save registers on stack /* - * r0 contains CPU context save/restore pointer in sdram - * r1 contains information about saving context: + * r0 contains information about saving context: * 0 - No context lost * 1 - Only L1 and logic lost * 2 - Only L2 lost (Even L1 is retained we clean it along with L2) @@ -166,19 +165,12 @@ ENTRY(omap34xx_cpu_suspend) */ ldr r4, omap3_do_wfi_sram_addr ldr r5, [r4] - cmp r1, #0x0 @ If no context save required, + cmp r0, #0x0 @ If no context save required, bxeq r5 @ jump to the WFI code in SRAM /* Otherwise fall through to the save context code */ save_context_wfi: - mov r8, r0 @ Store SDRAM address in r8 - mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register - mov r4, #0x1 @ Number of parameters for restore call - stmia r8!, {r4-r5} @ Push parameters for restore call - mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register - stmia r8!, {r4-r5} @ Push parameters for restore call - /* * jump out to kernel flush routine * - reuse that code is better -- cgit v1.2.3-18-g5258 From b059bdc39321696fe8f344acb7117d57fbd7b475 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Jun 2011 15:44:20 +0100 Subject: ARM: entry: rejig register allocation in exception entry handlers This allows us to avoid moving registers twice to work around the clobbered registers when we add calls to trace_hardirqs_{on,off}. Ensure that all SVC handlers return with SPSR in r5 for consistency. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 81 +++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 38 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index fd42e667a81..582bb231044 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -45,7 +45,7 @@ .endm .macro pabt_helper - mov r0, r2 @ pass address of aborted instruction. + mov r0, r4 @ pass address of aborted instruction. #ifdef MULTI_PABORT ldr ip, .LCprocfns mov lr, pc @@ -56,6 +56,8 @@ .endm .macro dabt_helper + mov r2, r4 + mov r3, r5 @ @ Call the processor-specific abort handler: @@ -157,26 +159,26 @@ ENDPROC(__und_invalid) SPFIX( subeq sp, sp, #4 ) stmia sp, {r1 - r12} - ldmia r0, {r1 - r3} - add r5, sp, #S_SP - 4 @ here for interlock avoidance - mov r4, #-1 @ "" "" "" "" - add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) - SPFIX( addeq r0, r0, #4 ) - str r1, [sp, #-4]! @ save the "real" r0 copied + ldmia r0, {r3 - r5} + add r7, sp, #S_SP - 4 @ here for interlock avoidance + mov r6, #-1 @ "" "" "" "" + add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) + SPFIX( addeq r2, r2, #4 ) + str r3, [sp, #-4]! @ save the "real" r0 copied @ from the exception stack - mov r1, lr + mov r3, lr @ @ We are now ready to fill in the remaining blanks on the stack: @ - @ r0 - sp_svc - @ r1 - lr_svc - @ r2 - lr_, already fixed up for correct return/restart - @ r3 - spsr_ - @ r4 - orig_r0 (see pt_regs definition in ptrace.h) + @ r2 - sp_svc + @ r3 - lr_svc + @ r4 - lr_, already fixed up for correct return/restart + @ r5 - spsr_ + @ r6 - orig_r0 (see pt_regs definition in ptrace.h) @ - stmia r5, {r0 - r4} + stmia r7, {r2 - r6} .endm .align 5 @@ -187,7 +189,7 @@ __dabt_svc: @ get ready to re-enable interrupts if appropriate @ mrs r9, cpsr - tst r3, #PSR_I_BIT + tst r5, #PSR_I_BIT biceq r9, r9, #PSR_I_BIT dabt_helper @@ -208,8 +210,8 @@ __dabt_svc: @ @ restore SPSR and restart the instruction @ - ldr r2, [sp, #S_PSR] - svc_exit r2 @ return from exception + ldr r5, [sp, #S_PSR] + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__dabt_svc) @@ -232,13 +234,13 @@ __irq_svc: tst r0, #_TIF_NEED_RESCHED blne svc_preempt #endif - ldr r4, [sp, #S_PSR] @ irqs are already disabled + ldr r5, [sp, #S_PSR] #ifdef CONFIG_TRACE_IRQFLAGS @ The parent context IRQs must have been enabled to get here in @ the first place, so there's no point checking the PSR I bit. bl trace_hardirqs_on #endif - svc_exit r4 @ return from exception + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__irq_svc) @@ -273,15 +275,16 @@ __und_svc: @ r0 - instruction @ #ifndef CONFIG_THUMB2_KERNEL - ldr r0, [r2, #-4] + ldr r0, [r4, #-4] #else - ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 + ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 and r9, r0, #0xf800 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 - ldrhhs r9, [r2] @ bottom 16 bits + ldrhhs r9, [r4] @ bottom 16 bits orrhs r0, r9, r0, lsl #16 #endif adr r9, BSYM(1f) + mov r2, r4 bl call_fpe mov r0, sp @ struct pt_regs *regs @@ -295,8 +298,8 @@ __und_svc: @ @ restore SPSR and restart the instruction @ - ldr r2, [sp, #S_PSR] @ Get SVC cpsr - svc_exit r2 @ return from exception + ldr r5, [sp, #S_PSR] @ Get SVC cpsr + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__und_svc) @@ -308,7 +311,7 @@ __pabt_svc: @ re-enable interrupts if appropriate @ mrs r9, cpsr - tst r3, #PSR_I_BIT + tst r5, #PSR_I_BIT biceq r9, r9, #PSR_I_BIT pabt_helper @@ -325,8 +328,8 @@ __pabt_svc: @ @ restore SPSR and restart the instruction @ - ldr r2, [sp, #S_PSR] - svc_exit r2 @ return from exception + ldr r5, [sp, #S_PSR] + svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__pabt_svc) @@ -357,23 +360,23 @@ ENDPROC(__pabt_svc) ARM( stmib sp, {r1 - r12} ) THUMB( stmia sp, {r0 - r12} ) - ldmia r0, {r1 - r3} + ldmia r0, {r3 - r5} add r0, sp, #S_PC @ here for interlock avoidance - mov r4, #-1 @ "" "" "" "" + mov r6, #-1 @ "" "" "" "" - str r1, [sp] @ save the "real" r0 copied + str r3, [sp] @ save the "real" r0 copied @ from the exception stack @ @ We are now ready to fill in the remaining blanks on the stack: @ - @ r2 - lr_, already fixed up for correct return/restart - @ r3 - spsr_ - @ r4 - orig_r0 (see pt_regs definition in ptrace.h) + @ r4 - lr_, already fixed up for correct return/restart + @ r5 - spsr_ + @ r6 - orig_r0 (see pt_regs definition in ptrace.h) @ @ Also, separately save sp_usr and lr_usr @ - stmia r0, {r2 - r4} + stmia r0, {r4 - r6} ARM( stmdb r0, {sp, lr}^ ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) @@ -397,7 +400,7 @@ ENDPROC(__pabt_svc) @ if it was interrupted in a critical region. Here we @ perform a quick test inline since it should be false @ 99.9999% of the time. The rest is done out of line. - cmp r2, #TASK_SIZE + cmp r4, #TASK_SIZE blhs kuser_cmpxchg_fixup #endif #endif @@ -441,6 +444,8 @@ ENDPROC(__irq_usr) .align 5 __und_usr: usr_entry + mov r2, r4 + mov r3, r5 @ @ fall through to the emulation code, which returns using r9 if @@ -894,13 +899,13 @@ __kuser_cmpxchg: @ 0xffff0fc0 .text kuser_cmpxchg_fixup: @ Called from kuser_cmpxchg_check macro. - @ r2 = address of interrupted insn (must be preserved). + @ r4 = address of interrupted insn (must be preserved). @ sp = saved regs. r7 and r8 are clobbered. @ 1b = first critical insn, 2b = last critical insn. - @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. + @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. mov r7, #0xffff0fff sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) - subs r8, r2, r7 + subs r8, r4, r7 rsbcss r8, r8, #(2b - 1b) strcs r7, [sp, #S_PC] mov pc, lr -- cgit v1.2.3-18-g5258 From 8b4186160b7894ca4583f702a562856d5d9e9118 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Jun 2011 19:25:02 +0100 Subject: ARM: entry: prefetch abort helper: pass aborted pc in r4 rather than r0 This avoids unnecessary instructions for CPUs which implement the IFAR (instruction fault address register). Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 2 +- arch/arm/mm/pabort-legacy.S | 3 ++- arch/arm/mm/pabort-v6.S | 3 ++- arch/arm/mm/pabort-v7.S | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 582bb231044..d644d0240ad 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -45,7 +45,7 @@ .endm .macro pabt_helper - mov r0, r4 @ pass address of aborted instruction. + @ PABORT handler takes fault address in r4 #ifdef MULTI_PABORT ldr ip, .LCprocfns mov lr, pc diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S index 87970eba88e..8a5d8aaf2d5 100644 --- a/arch/arm/mm/pabort-legacy.S +++ b/arch/arm/mm/pabort-legacy.S @@ -4,7 +4,7 @@ /* * Function: legacy_pabort * - * Params : r0 = address of aborted instruction + * Params : r4 = address of aborted instruction * * Returns : r0 = address of abort * : r1 = Simulated IFSR with section translation fault status @@ -14,6 +14,7 @@ .align 5 ENTRY(legacy_pabort) + mov r0, r4 mov r1, #5 mov pc, lr ENDPROC(legacy_pabort) diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S index 06e3d1ef211..eaac1cb7c4c 100644 --- a/arch/arm/mm/pabort-v6.S +++ b/arch/arm/mm/pabort-v6.S @@ -4,7 +4,7 @@ /* * Function: v6_pabort * - * Params : r0 = address of aborted instruction + * Params : r4 = address of aborted instruction * * Returns : r0 = address of abort * : r1 = IFSR @@ -14,6 +14,7 @@ .align 5 ENTRY(v6_pabort) + mov r0, r4 mrc p15, 0, r1, c5, c0, 1 @ get IFSR mov pc, lr ENDPROC(v6_pabort) diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S index a8b3b300a18..b515e0b059b 100644 --- a/arch/arm/mm/pabort-v7.S +++ b/arch/arm/mm/pabort-v7.S @@ -4,7 +4,7 @@ /* * Function: v6_pabort * - * Params : r0 = address of aborted instruction + * Params : r4 = address of aborted instruction * * Returns : r0 = address of abort * : r1 = IFSR -- cgit v1.2.3-18-g5258 From 29cb3cd208dd0e4471bb80bec4facc49ceb199fa Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 2 Jul 2011 09:54:01 +0100 Subject: ARM: pm: allow suspend finisher to return error codes There are SoCs where attempting to enter a low power state is ignored, and the CPU continues executing instructions with all state preserved. It is over-complex at that point to disable the MMU just to call the resume path. Instead, allow the suspend finisher to return error codes to abort suspend in this circumstance, where the cpu_suspend internals will then unwind the saved state on the stack. Also omit the tlb flush as no changes to the page tables will have happened. Signed-off-by: Russell King --- arch/arm/include/asm/suspend.h | 9 +++++---- arch/arm/kernel/sleep.S | 11 +++++++++-- arch/arm/mach-exynos4/pm.c | 2 +- arch/arm/mach-omap2/pm34xx.c | 3 ++- arch/arm/mach-pxa/include/mach/pm.h | 4 ++-- arch/arm/mach-pxa/pxa3xx.c | 2 +- arch/arm/mach-s3c2412/pm.c | 4 +++- arch/arm/mach-s3c2416/pm.c | 4 +++- arch/arm/mach-s3c64xx/pm.c | 2 +- arch/arm/mach-sa1100/pm.c | 2 +- arch/arm/plat-samsung/include/plat/pm.h | 4 ++-- arch/arm/plat-samsung/pm.c | 2 +- 12 files changed, 31 insertions(+), 18 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h index f8db9d096bc..b0e4e1a0231 100644 --- a/arch/arm/include/asm/suspend.h +++ b/arch/arm/include/asm/suspend.h @@ -10,12 +10,13 @@ extern void cpu_resume(void); * Hide the first two arguments to __cpu_suspend - these are an implementation * detail which platform code shouldn't have to know about. */ -static inline void cpu_suspend(unsigned long arg, void (*fn)(unsigned long)) +static inline int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { - extern void __cpu_suspend(int, long, unsigned long, - void (*)(unsigned long)); - __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn); + extern int __cpu_suspend(int, long, unsigned long, + int (*)(unsigned long)); + int ret = __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn); flush_tlb_all(); + return ret; } #endif diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index c156d0e5f45..dc902f2c684 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -12,7 +12,6 @@ * r1 = v:p offset * r2 = suspend function arg0 * r3 = suspend function - * Note: does not return until system resumes */ ENTRY(__cpu_suspend) stmfd sp!, {r4 - r11, lr} @@ -26,7 +25,7 @@ ENTRY(__cpu_suspend) #endif mov r6, sp @ current virtual SP sub sp, sp, r5 @ allocate CPU state on stack - mov r0, sp @ save pointer + mov r0, sp @ save pointer to CPU save block add ip, ip, r1 @ convert resume fn to phys stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn ldr r5, =sleep_save_sp @@ -55,10 +54,17 @@ ENTRY(__cpu_suspend) #else bl __cpuc_flush_kern_all #endif + adr lr, BSYM(cpu_suspend_abort) ldmfd sp!, {r0, pc} @ call suspend fn ENDPROC(__cpu_suspend) .ltorg +cpu_suspend_abort: + ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn + mov sp, r2 + ldmfd sp!, {r4 - r11, pc} +ENDPROC(cpu_suspend_abort) + /* * r0 = control register value * r1 = v:p offset (preserved by cpu_do_resume) @@ -89,6 +95,7 @@ cpu_resume_after_mmu: str r5, [r2, r4, lsl #2] @ restore old mapping mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache bl cpu_init @ restore the und/abt/irq banked regs + mov r0, #0 @ return zero on success ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_resume_after_mmu) diff --git a/arch/arm/mach-exynos4/pm.c b/arch/arm/mach-exynos4/pm.c index 5c01c607664..533c28f758c 100644 --- a/arch/arm/mach-exynos4/pm.c +++ b/arch/arm/mach-exynos4/pm.c @@ -280,7 +280,7 @@ static struct sleep_save exynos4_l2cc_save[] = { SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL), }; -void exynos4_cpu_suspend(unsigned long arg) +static int exynos4_cpu_suspend(unsigned long arg) { unsigned long tmp; unsigned long mask = 0xFFFFFFFF; diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 7238a63e24e..b77d82665ab 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -321,9 +321,10 @@ static void omap34xx_save_context(u32 *save) *save++ = val; } -static void omap34xx_do_sram_idle(unsigned long save_state) +static int omap34xx_do_sram_idle(unsigned long save_state) { omap34xx_cpu_suspend(save_state); + return 0; } void omap_sram_idle(void) diff --git a/arch/arm/mach-pxa/include/mach/pm.h b/arch/arm/mach-pxa/include/mach/pm.h index a566720527c..51558bcee99 100644 --- a/arch/arm/mach-pxa/include/mach/pm.h +++ b/arch/arm/mach-pxa/include/mach/pm.h @@ -22,8 +22,8 @@ struct pxa_cpu_pm_fns { extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns; /* sleep.S */ -extern void pxa25x_finish_suspend(unsigned long); -extern void pxa27x_finish_suspend(unsigned long); +extern int pxa25x_finish_suspend(unsigned long); +extern int pxa27x_finish_suspend(unsigned long); extern int pxa_pm_enter(suspend_state_t state); extern int pxa_pm_prepare(void); diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 9fe947b5d5f..ef1c56a67af 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c @@ -148,7 +148,7 @@ static void pxa3xx_cpu_pm_suspend(void) asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0)); #endif - extern void pxa3xx_finish_suspend(unsigned long); + extern int pxa3xx_finish_suspend(unsigned long); /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */ CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM); diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c index 9a1fb898db5..f4077efa51f 100644 --- a/arch/arm/mach-s3c2412/pm.c +++ b/arch/arm/mach-s3c2412/pm.c @@ -37,7 +37,7 @@ extern void s3c2412_sleep_enter(void); -static void s3c2412_cpu_suspend(unsigned long arg) +static int s3c2412_cpu_suspend(unsigned long arg) { unsigned long tmp; @@ -48,6 +48,8 @@ static void s3c2412_cpu_suspend(unsigned long arg) __raw_writel(tmp, S3C2412_PWRCFG); s3c2412_sleep_enter(); + + panic("sleep resumed to originator?"); } static void s3c2412_pm_prepare(void) diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c index 9e67a2a07a8..9ec54f1d8e7 100644 --- a/arch/arm/mach-s3c2416/pm.c +++ b/arch/arm/mach-s3c2416/pm.c @@ -24,7 +24,7 @@ extern void s3c2412_sleep_enter(void); -static void s3c2416_cpu_suspend(unsigned long arg) +static int s3c2416_cpu_suspend(unsigned long arg) { /* enable wakeup sources regardless of battery state */ __raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG); @@ -33,6 +33,8 @@ static void s3c2416_cpu_suspend(unsigned long arg) __raw_writel(0x2BED, S3C2443_PWRMODE); s3c2412_sleep_enter(); + + panic("sleep resumed to originator?"); } static void s3c2416_pm_prepare(void) diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c index 7cc1879af72..8bad6437068 100644 --- a/arch/arm/mach-s3c64xx/pm.c +++ b/arch/arm/mach-s3c64xx/pm.c @@ -112,7 +112,7 @@ void s3c_pm_save_core(void) * this. */ -static void s3c64xx_cpu_suspend(unsigned long arg) +static int s3c64xx_cpu_suspend(unsigned long arg) { unsigned long tmp; diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index cf9a1e9fb70..bf85b8b259d 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c @@ -33,7 +33,7 @@ #include #include -extern void sa1100_finish_suspend(unsigned long); +extern int sa1100_finish_suspend(unsigned long); #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h index 0a5b7faca83..f6749916d19 100644 --- a/arch/arm/plat-samsung/include/plat/pm.h +++ b/arch/arm/plat-samsung/include/plat/pm.h @@ -42,7 +42,7 @@ extern unsigned long s3c_irqwake_eintallow; /* per-cpu sleep functions */ extern void (*pm_cpu_prep)(void); -extern void (*pm_cpu_sleep)(unsigned long); +extern int (*pm_cpu_sleep)(unsigned long); /* Flags for PM Control */ @@ -54,7 +54,7 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */ extern void s3c_cpu_resume(void); -extern void s3c2410_cpu_suspend(unsigned long); +extern int s3c2410_cpu_suspend(unsigned long); /* sleep save info */ diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index 69d6b040a01..5fa1742d019 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c @@ -232,7 +232,7 @@ static void __maybe_unused s3c_pm_show_resume_irqs(int start, void (*pm_cpu_prep)(void); -void (*pm_cpu_sleep)(unsigned long); +int (*pm_cpu_sleep)(unsigned long); #define any_allowed(mask, allow) (((mask) & (allow)) != (allow)) -- cgit v1.2.3-18-g5258 From 02fe2845d6a837ab02f0738f6cf4591a02cc88d4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Jun 2011 11:44:06 +0100 Subject: ARM: entry: avoid enabling interrupts in prefetch/data abort handlers Avoid enabling interrupts if the parent context had interrupts enabled in the abort handler assembly code, and move this into the breakpoint/ page/alignment fault handlers instead. This gets rid of some special-casing for the breakpoint fault handlers from the low level abort handler path. Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 43 ++++++++++++++++++----------------------- arch/arm/kernel/entry-header.S | 19 ------------------ arch/arm/kernel/hw_breakpoint.c | 12 +++++------- arch/arm/mm/alignment.c | 3 +++ arch/arm/mm/fault.c | 4 ++++ 5 files changed, 31 insertions(+), 50 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d644d0240ad..c46bafa2f6d 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -185,20 +185,15 @@ ENDPROC(__und_invalid) __dabt_svc: svc_entry - @ - @ get ready to re-enable interrupts if appropriate - @ - mrs r9, cpsr - tst r5, #PSR_I_BIT - biceq r9, r9, #PSR_I_BIT +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif dabt_helper @ - @ set desired IRQ state, then call main handler + @ call main handler @ - debug_entry r1 - msr cpsr_c, r9 mov r2, sp bl do_DataAbort @@ -211,6 +206,12 @@ __dabt_svc: @ restore SPSR and restart the instruction @ ldr r5, [sp, #S_PSR] +#ifdef CONFIG_TRACE_IRQFLAGS + tst r5, #PSR_I_BIT + bleq trace_hardirqs_on + tst r5, #PSR_I_BIT + blne trace_hardirqs_off +#endif svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__dabt_svc) @@ -307,16 +308,11 @@ ENDPROC(__und_svc) __pabt_svc: svc_entry - @ - @ re-enable interrupts if appropriate - @ - mrs r9, cpsr - tst r5, #PSR_I_BIT - biceq r9, r9, #PSR_I_BIT +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif pabt_helper - debug_entry r1 - msr cpsr_c, r9 @ Maybe enable interrupts mov r2, sp @ regs bl do_PrefetchAbort @ call abort handler @@ -329,6 +325,12 @@ __pabt_svc: @ restore SPSR and restart the instruction @ ldr r5, [sp, #S_PSR] +#ifdef CONFIG_TRACE_IRQFLAGS + tst r5, #PSR_I_BIT + bleq trace_hardirqs_on + tst r5, #PSR_I_BIT + blne trace_hardirqs_off +#endif svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__pabt_svc) @@ -412,11 +414,6 @@ __dabt_usr: kuser_cmpxchg_check dabt_helper - @ - @ IRQs on, then call the main handler - @ - debug_entry r1 - enable_irq mov r2, sp adr lr, BSYM(ret_from_exception) b do_DataAbort @@ -663,8 +660,6 @@ ENDPROC(__und_usr_unknown) __pabt_usr: usr_entry pabt_helper - debug_entry r1 - enable_irq @ Enable interrupts mov r2, sp @ regs bl do_PrefetchAbort @ call abort handler UNWIND(.fnend ) diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 051166c2a93..4d6ad8348e8 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -165,25 +165,6 @@ .endm #endif /* !CONFIG_THUMB2_KERNEL */ - @ - @ Debug exceptions are taken as prefetch or data aborts. - @ We must disable preemption during the handler so that - @ we can access the debug registers safely. - @ - .macro debug_entry, fsr -#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT) - ldr r4, =0x40f @ mask out fsr.fs - and r5, r4, \fsr - cmp r5, #2 @ debug exception - bne 1f - get_thread_info r10 - ldr r6, [r10, #TI_PREEMPT] @ get preempt count - add r11, r6, #1 @ increment it - str r11, [r10, #TI_PREEMPT] -1: -#endif - .endm - /* * These are the registers used in the syscall handler, and allow us to * have in theory up to 7 arguments to a function - r0 to r6. diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 87acc25d7a3..a927ca1f556 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -796,7 +796,7 @@ unlock: /* * Called from either the Data Abort Handler [watchpoint] or the - * Prefetch Abort Handler [breakpoint] with preemption disabled. + * Prefetch Abort Handler [breakpoint] with interrupts disabled. */ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, struct pt_regs *regs) @@ -804,8 +804,10 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, int ret = 0; u32 dscr; - /* We must be called with preemption disabled. */ - WARN_ON(preemptible()); + preempt_disable(); + + if (interrupts_enabled(regs)) + local_irq_enable(); /* We only handle watchpoints and hardware breakpoints. */ ARM_DBG_READ(c1, 0, dscr); @@ -824,10 +826,6 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, ret = 1; /* Unhandled fault. */ } - /* - * Re-enable preemption after it was disabled in the - * low-level exception handling code. - */ preempt_enable(); return ret; diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 724ba3bce72..be7c638b648 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -727,6 +727,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) int isize = 4; int thumb2_32b = 0; + if (interrupts_enabled(regs)) + local_irq_enable(); + instrptr = instruction_pointer(regs); fs = get_fs(); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bc0e1d88fd3..20e5d512060 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -285,6 +285,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) tsk = current; mm = tsk->mm; + /* Enable interrupts if they were enabled in the parent context. */ + if (interrupts_enabled(regs)) + local_irq_enable(); + /* * If we're in an interrupt or have no user * context, we must not take the fault.. -- cgit v1.2.3-18-g5258 From df295df6c391e322a06dea0d2bc3d22debd15fb9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Jun 2011 16:55:58 +0100 Subject: ARM: entry: instrument svc undefined exception handler with irqtrace Add irqtrace function calls to the undefined exception handler, so that we get sane lockdep traces from locking problems in undefined exception handlers. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index c46bafa2f6d..920dd3d0795 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -268,6 +268,10 @@ __und_svc: svc_entry #endif +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif + @ @ call emulation code, which returns using r9 if it has emulated @ the instruction, or the more conventional lr if we are to treat @@ -300,6 +304,12 @@ __und_svc: @ restore SPSR and restart the instruction @ ldr r5, [sp, #S_PSR] @ Get SVC cpsr +#ifdef CONFIG_TRACE_IRQFLAGS + tst r5, #PSR_I_BIT + bleq trace_hardirqs_on + tst r5, #PSR_I_BIT + blne trace_hardirqs_off +#endif svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__und_svc) -- cgit v1.2.3-18-g5258 From bc089602d206b2abc2d2e8e5324d90342cc0447b Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Jun 2011 18:28:19 +0100 Subject: ARM: entry: instrument usr exception handlers with irqsoff tracing As we no longer re-enable interrupts in these exception handlers, add the irqsoff tracing calls to them so that the kernel tracks the state more accurately. Note that these calls are conditional on IRQSOFF_TRACER: kernel ----------> user ---------> kernel ^ irqs enabled ^ irqs disabled No kernel code can run on the local CPU until we've re-entered the kernel through one of the exception handlers - and userspace can not take any locks etc. So, the kernel doesn't care about the IRQ mask state while userspace is running unless we're doing IRQ off latency tracing. So, we can (and do) avoid the overhead of updating the IRQ mask state on every kernel->user and user->kernel transition. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 920dd3d0795..f863ee79093 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -421,6 +421,11 @@ ENDPROC(__pabt_svc) .align 5 __dabt_usr: usr_entry + +#ifdef CONFIG_IRQSOFF_TRACER + bl trace_hardirqs_off +#endif + kuser_cmpxchg_check dabt_helper @@ -433,12 +438,12 @@ ENDPROC(__dabt_usr) .align 5 __irq_usr: usr_entry - kuser_cmpxchg_check #ifdef CONFIG_IRQSOFF_TRACER bl trace_hardirqs_off #endif + kuser_cmpxchg_check irq_handler get_thread_info tsk mov why, #0 @@ -451,6 +456,11 @@ ENDPROC(__irq_usr) .align 5 __und_usr: usr_entry + +#ifdef CONFIG_IRQSOFF_TRACER + bl trace_hardirqs_off +#endif + mov r2, r4 mov r3, r5 @@ -669,6 +679,11 @@ ENDPROC(__und_usr_unknown) .align 5 __pabt_usr: usr_entry + +#ifdef CONFIG_IRQSOFF_TRACER + bl trace_hardirqs_off +#endif + pabt_helper mov r2, sp @ regs bl do_PrefetchAbort @ call abort handler -- cgit v1.2.3-18-g5258 From f2741b78b607576f0c256604cb3d9256b3428a32 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Jun 2011 17:35:19 +0100 Subject: ARM: entry: consolidate trace_hardirqs_off into (svc|usr)_entry macros All handlers now call trace_hardirqs_off, so move this common code into the (svc|usr)_entry assembler macros. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 47 ++++++++------------------------------------ 1 file changed, 8 insertions(+), 39 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index f863ee79093..a5b2c40d44e 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -179,16 +179,15 @@ ENDPROC(__und_invalid) @ r6 - orig_r0 (see pt_regs definition in ptrace.h) @ stmia r7, {r2 - r6} - .endm - - .align 5 -__dabt_svc: - svc_entry #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif + .endm + .align 5 +__dabt_svc: + svc_entry dabt_helper @ @@ -219,11 +218,6 @@ ENDPROC(__dabt_svc) .align 5 __irq_svc: svc_entry - -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif - irq_handler #ifdef CONFIG_PREEMPT @@ -267,11 +261,6 @@ __und_svc: #else svc_entry #endif - -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif - @ @ call emulation code, which returns using r9 if it has emulated @ the instruction, or the more conventional lr if we are to treat @@ -317,11 +306,6 @@ ENDPROC(__und_svc) .align 5 __pabt_svc: svc_entry - -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif - pabt_helper mov r2, sp @ regs bl do_PrefetchAbort @ call abort handler @@ -401,6 +385,10 @@ ENDPROC(__pabt_svc) @ Clear FP to mark the first stack frame @ zero_fp + +#ifdef CONFIG_IRQSOFF_TRACER + bl trace_hardirqs_off +#endif .endm .macro kuser_cmpxchg_check @@ -421,11 +409,6 @@ ENDPROC(__pabt_svc) .align 5 __dabt_usr: usr_entry - -#ifdef CONFIG_IRQSOFF_TRACER - bl trace_hardirqs_off -#endif - kuser_cmpxchg_check dabt_helper @@ -438,11 +421,6 @@ ENDPROC(__dabt_usr) .align 5 __irq_usr: usr_entry - -#ifdef CONFIG_IRQSOFF_TRACER - bl trace_hardirqs_off -#endif - kuser_cmpxchg_check irq_handler get_thread_info tsk @@ -457,10 +435,6 @@ ENDPROC(__irq_usr) __und_usr: usr_entry -#ifdef CONFIG_IRQSOFF_TRACER - bl trace_hardirqs_off -#endif - mov r2, r4 mov r3, r5 @@ -679,11 +653,6 @@ ENDPROC(__und_usr_unknown) .align 5 __pabt_usr: usr_entry - -#ifdef CONFIG_IRQSOFF_TRACER - bl trace_hardirqs_off -#endif - pabt_helper mov r2, sp @ regs bl do_PrefetchAbort @ call abort handler -- cgit v1.2.3-18-g5258 From d9600c99c549732a501cb727157800623a06175d Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 10:34:02 +0100 Subject: ARM: entry: re-allocate registers in irq entry assembly macros This avoids the irq entry assembly corrupting r5, thereby allowing it to be preserved through to the svc exit code. Signed-off-by: Russell King --- arch/arm/include/asm/entry-macro-multi.S | 14 +++++++------- arch/arm/kernel/entry-armv.S | 10 +++++----- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index 2da8547de6d..2f1e2098dfe 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -4,8 +4,8 @@ * Interrupt handling. Preserves r7, r8, r9 */ .macro arch_irq_handler_default - get_irqnr_preamble r5, lr -1: get_irqnr_and_base r0, r6, r5, lr + get_irqnr_preamble r6, lr +1: get_irqnr_and_base r0, r2, r6, lr movne r1, sp @ @ routine called with r0 = irq number, r1 = struct pt_regs * @@ -17,17 +17,17 @@ /* * XXX * - * this macro assumes that irqstat (r6) and base (r5) are + * this macro assumes that irqstat (r2) and base (r6) are * preserved from get_irqnr_and_base above */ - ALT_SMP(test_for_ipi r0, r6, r5, lr) + ALT_SMP(test_for_ipi r0, r2, r6, lr) ALT_UP_B(9997f) movne r1, sp adrne lr, BSYM(1b) bne do_IPI #ifdef CONFIG_LOCAL_TIMERS - test_for_ltirq r0, r6, r5, lr + test_for_ltirq r0, r2, r6, lr movne r0, sp adrne lr, BSYM(1b) bne do_local_timer @@ -40,7 +40,7 @@ .align 5 .global \symbol_name \symbol_name: - mov r4, lr + mov r8, lr arch_irq_handler_default - mov pc, r4 + mov pc, r8 .endm diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index a5b2c40d44e..b17e57949d3 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -29,16 +29,16 @@ #include /* - * Interrupt handling. Preserves r7, r8, r9 + * Interrupt handling. */ .macro irq_handler #ifdef CONFIG_MULTI_IRQ_HANDLER - ldr r5, =handle_arch_irq + ldr r1, =handle_arch_irq mov r0, sp - ldr r5, [r5] + ldr r1, [r1] adr lr, BSYM(9997f) - teq r5, #0 - movne pc, r5 + teq r1, #0 + movne pc, r1 #endif arch_irq_handler_default 9997: -- cgit v1.2.3-18-g5258 From 8dfe7ac96fedd4f5219879f63a8a546a33609daf Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 12:37:35 +0100 Subject: ARM: entry: prefetch abort: tail-call the main prefetch abort handler Tail-call the main C prefetch abort handler code from the per-CPU helper code. Also note that the helper function becomes ABI compliant in terms of the registers preserved. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 8 +++----- arch/arm/mm/pabort-legacy.S | 9 +++++---- arch/arm/mm/pabort-v6.S | 9 +++++---- arch/arm/mm/pabort-v7.S | 11 ++++++----- 4 files changed, 19 insertions(+), 18 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index b17e57949d3..af2fba7a4ca 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -45,7 +45,7 @@ .endm .macro pabt_helper - @ PABORT handler takes fault address in r4 + @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 #ifdef MULTI_PABORT ldr ip, .LCprocfns mov lr, pc @@ -306,9 +306,8 @@ ENDPROC(__und_svc) .align 5 __pabt_svc: svc_entry - pabt_helper mov r2, sp @ regs - bl do_PrefetchAbort @ call abort handler + pabt_helper @ @ IRQs off again before pulling preserved data off the stack @@ -653,9 +652,8 @@ ENDPROC(__und_usr_unknown) .align 5 __pabt_usr: usr_entry - pabt_helper mov r2, sp @ regs - bl do_PrefetchAbort @ call abort handler + pabt_helper UNWIND(.fnend ) /* fall through */ /* diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S index 8a5d8aaf2d5..8bbff025269 100644 --- a/arch/arm/mm/pabort-legacy.S +++ b/arch/arm/mm/pabort-legacy.S @@ -4,10 +4,11 @@ /* * Function: legacy_pabort * - * Params : r4 = address of aborted instruction + * Params : r2 = pt_regs + * : r4 = address of aborted instruction + * : r5 = psr for parent context * - * Returns : r0 = address of abort - * : r1 = Simulated IFSR with section translation fault status + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current prefetch abort. */ @@ -16,5 +17,5 @@ ENTRY(legacy_pabort) mov r0, r4 mov r1, #5 - mov pc, lr + b do_PrefetchAbort ENDPROC(legacy_pabort) diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S index eaac1cb7c4c..9627646ce78 100644 --- a/arch/arm/mm/pabort-v6.S +++ b/arch/arm/mm/pabort-v6.S @@ -4,10 +4,11 @@ /* * Function: v6_pabort * - * Params : r4 = address of aborted instruction + * Params : r2 = pt_regs + * : r4 = address of aborted instruction + * : r5 = psr for parent context * - * Returns : r0 = address of abort - * : r1 = IFSR + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current prefetch abort. */ @@ -16,5 +17,5 @@ ENTRY(v6_pabort) mov r0, r4 mrc p15, 0, r1, c5, c0, 1 @ get IFSR - mov pc, lr + b do_PrefetchAbort ENDPROC(v6_pabort) diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S index b515e0b059b..875761f44f3 100644 --- a/arch/arm/mm/pabort-v7.S +++ b/arch/arm/mm/pabort-v7.S @@ -2,12 +2,13 @@ #include /* - * Function: v6_pabort + * Function: v7_pabort * - * Params : r4 = address of aborted instruction + * Params : r2 = pt_regs + * : r4 = address of aborted instruction + * : r5 = psr for parent context * - * Returns : r0 = address of abort - * : r1 = IFSR + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current prefetch abort. */ @@ -16,5 +17,5 @@ ENTRY(v7_pabort) mrc p15, 0, r0, c6, c0, 2 @ get IFAR mrc p15, 0, r1, c5, c0, 1 @ get IFSR - mov pc, lr + b do_PrefetchAbort ENDPROC(v7_pabort) -- cgit v1.2.3-18-g5258 From 3e287bec6fde088bff05ee7f998f53e8ac75b922 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 14:35:07 +0100 Subject: ARM: entry: data abort: arrange for CPU abort helpers to take pc/psr in r4/r5 Re-jig the CPU abort helpers to take the PC/PSR in r4/r5 rather than r2/r3. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 6 ++---- arch/arm/mm/abort-ev4.S | 8 +++----- arch/arm/mm/abort-ev4t.S | 8 ++++---- arch/arm/mm/abort-ev5t.S | 8 ++++---- arch/arm/mm/abort-ev5tj.S | 12 +++++------- arch/arm/mm/abort-ev6.S | 12 +++++------- arch/arm/mm/abort-ev7.S | 4 ++-- arch/arm/mm/abort-lv4t.S | 12 ++++++------ arch/arm/mm/abort-nommu.S | 4 ++-- arch/arm/mm/proc-arm6_7.S | 10 +++++----- 10 files changed, 38 insertions(+), 46 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index af2fba7a4ca..85298c09325 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -56,14 +56,12 @@ .endm .macro dabt_helper - mov r2, r4 - mov r3, r5 @ @ Call the processor-specific abort handler: @ - @ r2 - aborted context pc - @ r3 - aborted context cpsr + @ r4 - aborted context pc + @ r5 - aborted context psr @ @ The abort handler must return the aborted address in r0, and @ the fault status register in r1. r9 must be preserved. diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S index 4f18f9e87ba..beb112bdc04 100644 --- a/arch/arm/mm/abort-ev4.S +++ b/arch/arm/mm/abort-ev4.S @@ -3,8 +3,8 @@ /* * Function: v4_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -21,10 +21,8 @@ ENTRY(v4_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - ldr r3, [r2] @ read aborted ARM instruction + ldr r3, [r4] @ read aborted ARM instruction bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. mov pc, lr - - diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S index 9910123079c..eaa4ac02395 100644 --- a/arch/arm/mm/abort-ev4t.S +++ b/arch/arm/mm/abort-ev4t.S @@ -4,8 +4,8 @@ /* * Function: v4t_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -22,8 +22,8 @@ ENTRY(v4t_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 - ldreq r3, [r2] @ read aborted ARM instruction + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 800e8d42d39..97eee7c4801 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S @@ -4,8 +4,8 @@ /* * Function: v5t_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -22,8 +22,8 @@ ENTRY(v5t_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 - ldreq r3, [r2] @ read aborted ARM instruction + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction bic r1, r1, #1 << 11 @ clear bits 11 of FSR do_ldrd_abort tmp=r2, insn=r3 tst r3, #1 << 20 @ check write diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index bcb58d2fc11..9a365cf1936 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S @@ -4,8 +4,8 @@ /* * Function: v5tj_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -23,13 +23,11 @@ ENTRY(v5tj_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR - tst r3, #PSR_J_BIT @ Java? + tst r5, #PSR_J_BIT @ Java? movne pc, lr - do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 - ldreq r3, [r2] @ read aborted ARM instruction + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction do_ldrd_abort tmp=r2, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr - - diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index ef526e702a5..52db4a3fc5f 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -4,8 +4,8 @@ /* * Function: v6_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -33,10 +33,10 @@ ENTRY(v6_early_abort) * The test below covers all the write situations, including Java bytecodes */ bic r1, r1, #1 << 11 @ clear bit 11 of FSR - tst r3, #PSR_J_BIT @ Java? + tst r5, #PSR_J_BIT @ Java? movne pc, lr - do_thumb_abort fsr=r1, pc=r2, psr=r3, tmp=r3 - ldreq r3, [r2] @ read aborted ARM instruction + do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 + ldreq r3, [r4] @ read aborted ARM instruction #ifdef CONFIG_CPU_ENDIAN_BE8 reveq r3, r3 #endif @@ -44,5 +44,3 @@ ENTRY(v6_early_abort) tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr - - diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index ec88b157d3b..6cb51431a85 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -3,8 +3,8 @@ /* * Function: v7_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index 9fb7b0e25ea..fea7514225a 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -3,8 +3,8 @@ /* * Function: v4t_late_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = address of abort * : r1 = FSR, bit 11 = write @@ -18,7 +18,7 @@ * picture. Unfortunately, this does happen. We live with it. */ ENTRY(v4t_late_abort) - tst r3, #PSR_T_BIT @ check for thumb mode + tst r5, #PSR_T_BIT @ check for thumb mode #ifdef CONFIG_CPU_CP15_MMU mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR @@ -28,7 +28,7 @@ ENTRY(v4t_late_abort) mov r1, #0 #endif bne .data_thumb_abort - ldr r8, [r2] @ read arm instruction + ldr r8, [r4] @ read arm instruction tst r8, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #15 << 24 @@ -52,7 +52,7 @@ ENTRY(v4t_late_abort) /* e */ b .data_unknown /* f */ .data_unknown: @ Part of jumptable - mov r0, r2 + mov r0, r4 mov r1, r8 mov r2, sp bl baddataabort @@ -159,7 +159,7 @@ ENTRY(v4t_late_abort) b .data_unknown @ F: MUL? .data_thumb_abort: - ldrh r8, [r2] @ read instruction + ldrh r8, [r4] @ read instruction tst r8, #1 << 11 @ L = 1 -> write? orreq r1, r1, #1 << 8 @ yes and r7, r8, #15 << 12 diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S index 625e580945b..9eaef6f846c 100644 --- a/arch/arm/mm/abort-nommu.S +++ b/arch/arm/mm/abort-nommu.S @@ -3,8 +3,8 @@ /* * Function: nommu_early_abort * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Returns : r0 = 0 (abort address) * : r1 = 0 (FSR) diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 5f79dc4ce3f..e7be700db08 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -29,8 +29,8 @@ ENTRY(cpu_arm7_dcache_clean_area) /* * Function: arm6_7_data_abort () * - * Params : r2 = address of aborted instruction - * : sp = pointer to registers + * Params : r4 = aborted context pc + * : r5 = aborted context psr * * Purpose : obtain information about current aborted instruction * @@ -41,7 +41,7 @@ ENTRY(cpu_arm7_dcache_clean_area) ENTRY(cpu_arm7_data_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - ldr r8, [r2] @ read arm instruction + ldr r8, [r4] @ read arm instruction tst r8, #1 << 20 @ L = 0 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #15 << 24 @@ -65,7 +65,7 @@ ENTRY(cpu_arm7_data_abort) /* e */ b .data_unknown /* f */ .data_unknown: @ Part of jumptable - mov r0, r2 + mov r0, r4 mov r1, r8 mov r2, sp bl baddataabort @@ -74,7 +74,7 @@ ENTRY(cpu_arm7_data_abort) ENTRY(cpu_arm6_data_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - ldr r8, [r2] @ read arm instruction + ldr r8, [r4] @ read arm instruction tst r8, #1 << 20 @ L = 0 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #14 << 24 -- cgit v1.2.3-18-g5258 From 0d147db0c127c561f8f9ead9f3c1ec38f89f1040 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 14:42:02 +0100 Subject: ARM: entry: data abort: avoid using r2 in abort helpers This allows us to pass the pt_regs pointer in to these functions ready for tail-calling the abort handler. Signed-off-by: Russell King --- arch/arm/mm/abort-ev5t.S | 2 +- arch/arm/mm/abort-ev5tj.S | 2 +- arch/arm/mm/abort-ev6.S | 2 +- arch/arm/mm/abort-ev7.S | 8 ++++---- arch/arm/mm/abort-lv4t.S | 34 +++++++++++++++++----------------- arch/arm/mm/proc-arm6_7.S | 18 +++++++++--------- 6 files changed, 33 insertions(+), 33 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 97eee7c4801..751391a5de5 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S @@ -25,7 +25,7 @@ ENTRY(v5t_early_abort) do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction bic r1, r1, #1 << 11 @ clear bits 11 of FSR - do_ldrd_abort tmp=r2, insn=r3 + do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 mov pc, lr diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index 9a365cf1936..ccfbc937054 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S @@ -27,7 +27,7 @@ ENTRY(v5tj_early_abort) movne pc, lr do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction - do_ldrd_abort tmp=r2, insn=r3 + do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 52db4a3fc5f..b64d886c0be 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -40,7 +40,7 @@ ENTRY(v6_early_abort) #ifdef CONFIG_CPU_ENDIAN_BE8 reveq r3, r3 #endif - do_ldrd_abort tmp=r2, insn=r3 + do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. mov pc, lr diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index 6cb51431a85..6f98b3a17ac 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -41,13 +41,13 @@ ENTRY(v7_early_abort) mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR isb - mrc p15, 0, r2, c7, c4, 0 @ Read the PAR - and r3, r2, #0x7b @ On translation fault + mrc p15, 0, ip, c7, c4, 0 @ Read the PAR + and r3, ip, #0x7b @ On translation fault cmp r3, #0x0b movne pc, lr bic r1, r1, #0xf @ Fix up FSR FS[5:0] - and r2, r2, #0x7e - orr r1, r1, r2, LSR #1 + and ip, ip, #0x7e + orr r1, r1, ip, LSR #1 #endif mov pc, lr diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index fea7514225a..d032b1f2067 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -64,12 +64,12 @@ ENTRY(v4t_late_abort) mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 - and r2, r8, r7, lsl #1 - add r6, r6, r2, lsr #1 - and r2, r8, r7, lsl #2 - add r6, r6, r2, lsr #2 - and r2, r8, r7, lsl #3 - add r6, r6, r2, lsr #3 + and r9, r8, r7, lsl #1 + add r6, r6, r9, lsr #1 + and r9, r8, r7, lsl #2 + add r6, r6, r9, lsr #2 + and r9, r8, r7, lsl #3 + add r6, r6, r9, lsr #3 add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. @@ -103,13 +103,13 @@ ENTRY(v4t_late_abort) tst r8, #1 << 21 @ check writeback bit moveq pc, lr @ no writeback -> no fixup .data_arm_lateldrpostconst: - movs r2, r8, lsl #20 @ Get offset + movs r9, r8, lsl #20 @ Get offset moveq pc, lr @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit - subne r7, r7, r2, lsr #20 @ Undo increment - addeq r7, r7, r2, lsr #20 @ Undo decrement + subne r7, r7, r9, lsr #20 @ Undo increment + addeq r7, r7, r9, lsr #20 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' mov pc, lr @@ -194,11 +194,11 @@ ENTRY(v4t_late_abort) tst r8, #1 << 10 beq .data_unknown and r6, r8, #0x55 @ hweight8(r8) + R bit - and r2, r8, #0xaa - add r6, r6, r2, lsr #1 - and r2, r6, #0xcc + and r9, r8, #0xaa + add r6, r6, r9, lsr #1 + and r9, r6, #0xcc and r6, r6, #0x33 - add r6, r6, r2, lsr #2 + add r6, r6, r9, lsr #2 movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) adc r6, r6, r6, lsr #4 @ high + low nibble + R bit and r6, r6, #15 @ number of regs to transfer @@ -211,11 +211,11 @@ ENTRY(v4t_late_abort) .data_thumb_ldmstm: and r6, r8, #0x55 @ hweight8(r8) - and r2, r8, #0xaa - add r6, r6, r2, lsr #1 - and r2, r6, #0xcc + and r9, r8, #0xaa + add r6, r6, r9, lsr #1 + and r9, r6, #0xcc and r6, r6, #0x33 - add r6, r6, r2, lsr #2 + add r6, r6, r9, lsr #2 add r6, r6, r6, lsr #4 and r5, r8, #7 << 8 ldr r7, [sp, r5, lsr #6] diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index e7be700db08..d4c328ecf3b 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -87,12 +87,12 @@ ENTRY(cpu_arm6_data_abort) mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 - and r2, r8, r7, lsl #1 - add r6, r6, r2, lsr #1 - and r2, r8, r7, lsl #2 - add r6, r6, r2, lsr #2 - and r2, r8, r7, lsl #3 - add r6, r6, r2, lsr #3 + and r9, r8, r7, lsl #1 + add r6, r6, r9, lsr #1 + and r9, r8, r7, lsl #2 + add r6, r6, r9, lsr #2 + and r9, r8, r7, lsl #3 + add r6, r6, r9, lsr #3 add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. @@ -117,13 +117,13 @@ ENTRY(cpu_arm6_data_abort) tst r8, #1 << 21 @ check writeback bit moveq pc, lr @ no writeback -> no fixup .data_arm_lateldrpostconst: - movs r2, r8, lsl #20 @ Get offset + movs r9, r8, lsl #20 @ Get offset moveq pc, lr @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit - subne r7, r7, r2, lsr #20 @ Undo increment - addeq r7, r7, r2, lsr #20 @ Undo decrement + subne r7, r7, r9, lsr #20 @ Undo increment + addeq r7, r7, r9, lsr #20 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' mov pc, lr -- cgit v1.2.3-18-g5258 From da7404725781bc7c736e10cae5521e5604e222a5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 16:01:26 +0100 Subject: ARM: entry: data abort: tail-call the main data abort handler Tail-call the main C data abort handler code from the per-CPU helper code. Update the comments in the code wrt the new calling and return register state. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 14 ++++---------- arch/arm/mm/abort-ev4.S | 11 ++++------- arch/arm/mm/abort-ev4t.S | 11 ++++------- arch/arm/mm/abort-ev5t.S | 11 ++++------- arch/arm/mm/abort-ev5tj.S | 13 +++++-------- arch/arm/mm/abort-ev6.S | 13 +++++-------- arch/arm/mm/abort-ev7.S | 15 ++++++--------- arch/arm/mm/abort-lv4t.S | 43 +++++++++++++++++++++---------------------- arch/arm/mm/abort-macro.S | 4 ++-- arch/arm/mm/abort-nommu.S | 8 ++++---- arch/arm/mm/proc-arm6_7.S | 29 ++++++++++++++--------------- 11 files changed, 73 insertions(+), 99 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 85298c09325..bbdd443b805 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -60,6 +60,7 @@ @ @ Call the processor-specific abort handler: @ + @ r2 - pt_regs @ r4 - aborted context pc @ r5 - aborted context psr @ @@ -186,13 +187,8 @@ ENDPROC(__und_invalid) .align 5 __dabt_svc: svc_entry - dabt_helper - - @ - @ call main handler - @ mov r2, sp - bl do_DataAbort + dabt_helper @ @ IRQs off again before pulling preserved data off the stack @@ -407,11 +403,9 @@ ENDPROC(__pabt_svc) __dabt_usr: usr_entry kuser_cmpxchg_check - dabt_helper - mov r2, sp - adr lr, BSYM(ret_from_exception) - b do_DataAbort + dabt_helper + b ret_from_exception UNWIND(.fnend ) ENDPROC(__dabt_usr) diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S index beb112bdc04..54473cd4aba 100644 --- a/arch/arm/mm/abort-ev4.S +++ b/arch/arm/mm/abort-ev4.S @@ -3,14 +3,11 @@ /* * Function: v4_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -25,4 +22,4 @@ ENTRY(v4_early_abort) bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ L = 1 -> write? orreq r1, r1, #1 << 11 @ yes. - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S index eaa4ac02395..9da704e7b86 100644 --- a/arch/arm/mm/abort-ev4t.S +++ b/arch/arm/mm/abort-ev4t.S @@ -4,14 +4,11 @@ /* * Function: v4t_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -27,4 +24,4 @@ ENTRY(v4t_early_abort) bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 751391a5de5..a0908d4653a 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S @@ -4,14 +4,11 @@ /* * Function: v5t_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -28,4 +25,4 @@ ENTRY(v5t_early_abort) do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ check write orreq r1, r1, #1 << 11 - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index ccfbc937054..4006b7a6126 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S @@ -4,14 +4,11 @@ /* * Function: v5tj_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -24,10 +21,10 @@ ENTRY(v5tj_early_abort) mrc p15, 0, r0, c6, c0, 0 @ get FAR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR tst r5, #PSR_J_BIT @ Java? - movne pc, lr + bne do_DataAbort do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index b64d886c0be..ff1f7cc11f8 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -4,14 +4,11 @@ /* * Function: v6_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -34,7 +31,7 @@ ENTRY(v6_early_abort) */ bic r1, r1, #1 << 11 @ clear bit 11 of FSR tst r5, #PSR_J_BIT @ Java? - movne pc, lr + bne do_DataAbort do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 ldreq r3, [r4] @ read aborted ARM instruction #ifdef CONFIG_CPU_ENDIAN_BE8 @@ -43,4 +40,4 @@ ENTRY(v6_early_abort) do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index 6f98b3a17ac..703375277ba 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -3,14 +3,11 @@ /* * Function: v7_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4 - r11, r13 preserved * * Purpose : obtain information about current aborted instruction. */ @@ -37,18 +34,18 @@ ENTRY(v7_early_abort) ldr r3, =0x40d @ On permission fault and r3, r1, r3 cmp r3, #0x0d - movne pc, lr + bne do_DataAbort mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR isb mrc p15, 0, ip, c7, c4, 0 @ Read the PAR and r3, ip, #0x7b @ On translation fault cmp r3, #0x0b - movne pc, lr + bne do_DataAbort bic r1, r1, #0xf @ Fix up FSR FS[5:0] and ip, ip, #0x7e orr r1, r1, ip, LSR #1 #endif - mov pc, lr + b do_DataAbort ENDPROC(v7_early_abort) diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index d032b1f2067..d432f31cdab 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -3,7 +3,8 @@ /* * Function: v4t_late_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * * Returns : r0 = address of abort @@ -47,20 +48,18 @@ ENTRY(v4t_late_abort) /* 9 */ b .data_arm_ldmstm @ ldm*b rn, /* a */ b .data_unknown /* b */ b .data_unknown -/* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m -/* d */ mov pc, lr @ ldc rd, [rn, #m] +/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m +/* d */ b do_DataAbort @ ldc rd, [rn, #m] /* e */ b .data_unknown /* f */ .data_unknown: @ Part of jumptable mov r0, r4 mov r1, r8 - mov r2, sp - bl baddataabort - b ret_from_exception + b baddataabort .data_arm_ldmstm: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 @@ -79,11 +78,11 @@ ENTRY(v4t_late_abort) subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_lateldrhpre: tst r8, #1 << 21 @ Check writeback bit - moveq pc, lr @ No writeback -> no fixup + beq do_DataAbort @ No writeback -> no fixup .data_arm_lateldrhpost: and r5, r8, #0x00f @ get Rm / low nibble of immediate value tst r8, #1 << 22 @ if (immediate offset) @@ -97,25 +96,25 @@ ENTRY(v4t_late_abort) subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_lateldrpreconst: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: movs r9, r8, lsl #20 @ Get offset - moveq pc, lr @ zero -> no fixup + beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r9, lsr #20 @ Undo increment addeq r7, r7, r9, lsr #20 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_lateldrprereg: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' @@ -172,10 +171,10 @@ ENTRY(v4t_late_abort) /* 3 */ b .data_unknown /* 4 */ b .data_unknown /* 5 */ b .data_thumb_reg -/* 6 */ mov pc, lr -/* 7 */ mov pc, lr -/* 8 */ mov pc, lr -/* 9 */ mov pc, lr +/* 6 */ b do_DataAbort +/* 7 */ b do_DataAbort +/* 8 */ b do_DataAbort +/* 9 */ b do_DataAbort /* A */ b .data_unknown /* B */ b .data_thumb_pushpop /* C */ b .data_thumb_ldmstm @@ -185,10 +184,10 @@ ENTRY(v4t_late_abort) .data_thumb_reg: tst r8, #1 << 9 - moveq pc, lr + beq do_DataAbort tst r8, #1 << 10 @ If 'S' (signed) bit is set movne r1, #0 @ it must be a load instr - mov pc, lr + b do_DataAbort .data_thumb_pushpop: tst r8, #1 << 10 @@ -207,7 +206,7 @@ ENTRY(v4t_late_abort) addeq r7, r7, r6, lsl #2 @ increment SP if PUSH subne r7, r7, r6, lsl #2 @ decrement SP if POP str r7, [sp, #13 << 2] - mov pc, lr + b do_DataAbort .data_thumb_ldmstm: and r6, r8, #0x55 @ hweight8(r8) @@ -222,4 +221,4 @@ ENTRY(v4t_late_abort) and r6, r6, #15 @ number of regs to transfer sub r7, r7, r6, lsl #2 @ always decrement str r7, [sp, r5, lsr #6] - mov pc, lr + b do_DataAbort diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index af97a10bc5e..52162d59407 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -18,7 +18,7 @@ orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes tst \tmp, #1 << 11 @ L = 0 -> write orreq \psr, \psr, #1 << 11 @ yes. - mov pc, lr + b do_DataAbort not_thumb: .endm @@ -34,7 +34,7 @@ not_thumb: bne not_ldrd and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 cmp \tmp, #0x000000d0 - moveq pc, lr + beq do_DataAbort not_ldrd: .endm diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S index 9eaef6f846c..119cb479c2a 100644 --- a/arch/arm/mm/abort-nommu.S +++ b/arch/arm/mm/abort-nommu.S @@ -3,11 +3,11 @@ /* * Function: nommu_early_abort * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = 0 (abort address) - * : r1 = 0 (FSR) + * Returns : r4 - r11, r13 preserved * * Note: There is no FSR/FAR on !CPU_CP15_MMU cores. * Just fill zero into the registers. @@ -16,5 +16,5 @@ ENTRY(nommu_early_abort) mov r0, #0 @ clear r0, r1 (no FSR/FAR) mov r1, #0 - mov pc, lr + b do_DataAbort ENDPROC(nommu_early_abort) diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index d4c328ecf3b..d755d5b8389 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -29,7 +29,8 @@ ENTRY(cpu_arm7_dcache_clean_area) /* * Function: arm6_7_data_abort () * - * Params : r4 = aborted context pc + * Params : r2 = pt_regs + * : r4 = aborted context pc * : r5 = aborted context psr * * Purpose : obtain information about current aborted instruction @@ -49,7 +50,7 @@ ENTRY(cpu_arm7_data_abort) nop /* 0 */ b .data_unknown -/* 1 */ mov pc, lr @ swp +/* 1 */ b do_DataAbort @ swp /* 2 */ b .data_unknown /* 3 */ b .data_unknown /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m @@ -60,16 +61,14 @@ ENTRY(cpu_arm7_data_abort) /* 9 */ b .data_arm_ldmstm @ ldm*b rn, /* a */ b .data_unknown /* b */ b .data_unknown -/* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m -/* d */ mov pc, lr @ ldc rd, [rn, #m] +/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m +/* d */ b do_DataAbort @ ldc rd, [rn, #m] /* e */ b .data_unknown /* f */ .data_unknown: @ Part of jumptable mov r0, r4 mov r1, r8 - mov r2, sp - bl baddataabort - b ret_from_exception + b baddataabort ENTRY(cpu_arm6_data_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR @@ -79,11 +78,11 @@ ENTRY(cpu_arm6_data_abort) orreq r1, r1, #1 << 11 @ yes. and r7, r8, #14 << 24 teq r7, #8 << 24 @ was it ldm/stm - movne pc, lr + bne do_DataAbort .data_arm_ldmstm: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup mov r7, #0x11 orr r7, r7, #0x1100 and r6, r8, r7 @@ -102,7 +101,7 @@ ENTRY(cpu_arm6_data_abort) subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_apply_r6_and_rn: and r5, r8, #15 << 16 @ Extract 'n' from instruction @@ -111,25 +110,25 @@ ENTRY(cpu_arm6_data_abort) subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_lateldrpreconst: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: movs r9, r8, lsl #20 @ Get offset - moveq pc, lr @ zero -> no fixup + beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r9, lsr #20 @ Undo increment addeq r7, r7, r9, lsr #20 @ Undo decrement str r7, [sp, r5, lsr #14] @ Put register 'Rn' - mov pc, lr + b do_DataAbort .data_arm_lateldrprereg: tst r8, #1 << 21 @ check writeback bit - moveq pc, lr @ no writeback -> no fixup + beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' -- cgit v1.2.3-18-g5258 From e22c12f9146d50ee6b0cf97db46b3310409f64e6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Jun 2011 09:52:54 +0100 Subject: ARM: entry: data abort: use r2 as base of pt_regs rather than stack Now that we pass r2 into these helper functions as the pointer to pt_regs, use r2 as the base of the registers on the stack rather than using the stack pointer directly. Signed-off-by: Russell King --- arch/arm/mm/abort-lv4t.S | 24 ++++++++++++------------ arch/arm/mm/proc-arm6_7.S | 14 +++++++------- 2 files changed, 19 insertions(+), 19 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index d432f31cdab..921aaab0c8c 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -73,11 +73,11 @@ ENTRY(v4t_late_abort) add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrhpre: @@ -88,14 +88,14 @@ ENTRY(v4t_late_abort) tst r8, #1 << 22 @ if (immediate offset) andne r6, r8, #0xf00 @ { immediate high nibble orrne r6, r5, r6, lsr #4 @ combine nibbles } else - ldreq r6, [sp, r5, lsl #2] @ { load Rm value } + ldreq r6, [r2, r5, lsl #2] @ { load Rm value } .data_arm_apply_r6_and_rn: and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrpreconst: @@ -105,11 +105,11 @@ ENTRY(v4t_late_abort) movs r9, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r9, lsr #20 @ Undo increment addeq r7, r7, r9, lsr #20 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrprereg: @@ -117,7 +117,7 @@ ENTRY(v4t_late_abort) beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction - ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' + ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' mov r5, r8, lsr #7 @ get shift count ands r5, r5, #31 and r7, r8, #0x70 @ get shift type @@ -201,11 +201,11 @@ ENTRY(v4t_late_abort) movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) adc r6, r6, r6, lsr #4 @ high + low nibble + R bit and r6, r6, #15 @ number of regs to transfer - ldr r7, [sp, #13 << 2] + ldr r7, [r2, #13 << 2] tst r8, #1 << 11 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH subne r7, r7, r6, lsl #2 @ decrement SP if POP - str r7, [sp, #13 << 2] + str r7, [r2, #13 << 2] b do_DataAbort .data_thumb_ldmstm: @@ -217,8 +217,8 @@ ENTRY(v4t_late_abort) add r6, r6, r9, lsr #2 add r6, r6, r6, lsr #4 and r5, r8, #7 << 8 - ldr r7, [sp, r5, lsr #6] + ldr r7, [r2, r5, lsr #6] and r6, r6, #15 @ number of regs to transfer sub r7, r7, r6, lsl #2 @ always decrement - str r7, [sp, r5, lsr #6] + str r7, [r2, r5, lsr #6] b do_DataAbort diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index d755d5b8389..141906eae26 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -96,20 +96,20 @@ ENTRY(cpu_arm6_data_abort) add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_apply_r6_and_rn: and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrpreconst: @@ -119,11 +119,11 @@ ENTRY(cpu_arm6_data_abort) movs r9, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' + ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r9, lsr #20 @ Undo increment addeq r7, r7, r9, lsr #20 @ Undo decrement - str r7, [sp, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrprereg: @@ -131,7 +131,7 @@ ENTRY(cpu_arm6_data_abort) beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction - ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' + ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' mov r5, r8, lsr #7 @ get shift count ands r5, r5, #31 and r7, r8, #0x70 @ get shift type -- cgit v1.2.3-18-g5258 From 108f6af0a82cf3b61f3ac6728e2241805a935b64 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Jun 2011 12:23:11 +0100 Subject: ARM: entry: data abort: always use r6 for offset Signed-off-by: Russell King --- arch/arm/mm/abort-lv4t.S | 6 +++--- arch/arm/mm/proc-arm6_7.S | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index 921aaab0c8c..54b6d279371 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -102,13 +102,13 @@ ENTRY(v4t_late_abort) tst r8, #1 << 21 @ check writeback bit beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: - movs r9, r8, lsl #20 @ Get offset + movs r6, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit - subne r7, r7, r9, lsr #20 @ Undo increment - addeq r7, r7, r9, lsr #20 @ Undo decrement + subne r7, r7, r6, lsr #20 @ Undo increment + addeq r7, r7, r6, lsr #20 @ Undo decrement str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 141906eae26..4d963114c66 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -116,13 +116,13 @@ ENTRY(cpu_arm6_data_abort) tst r8, #1 << 21 @ check writeback bit beq do_DataAbort @ no writeback -> no fixup .data_arm_lateldrpostconst: - movs r9, r8, lsl #20 @ Get offset + movs r6, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup and r5, r8, #15 << 16 @ Extract 'n' from instruction ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit - subne r7, r7, r9, lsr #20 @ Undo increment - addeq r7, r7, r9, lsr #20 @ Undo decrement + subne r7, r7, r6, lsr #20 @ Undo increment + addeq r7, r7, r6, lsr #20 @ Undo decrement str r7, [r2, r5, lsr #14] @ Put register 'Rn' b do_DataAbort -- cgit v1.2.3-18-g5258 From 40f0b90a2f16f433f9afbfef4b7c312efb54e933 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Jun 2011 12:27:47 +0100 Subject: ARM: entry: data abort: ensure r5 is preserved by abort functions Signed-off-by: Russell King --- arch/arm/mm/abort-lv4t.S | 48 ++++++++++++++++++++++------------------------- arch/arm/mm/proc-arm6_7.S | 33 ++++++++++++++++---------------- 2 files changed, 38 insertions(+), 43 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index 54b6d279371..f3982580c27 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S @@ -7,11 +7,7 @@ * : r4 = aborted context pc * : r5 = aborted context psr * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers + * Returns : r4-r5, r10-r11, r13 preserved * * Purpose : obtain information about current aborted instruction. * Note: we read user space. This means we might cause a data @@ -72,30 +68,30 @@ ENTRY(v4t_late_abort) add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrhpre: tst r8, #1 << 21 @ Check writeback bit beq do_DataAbort @ No writeback -> no fixup .data_arm_lateldrhpost: - and r5, r8, #0x00f @ get Rm / low nibble of immediate value + and r9, r8, #0x00f @ get Rm / low nibble of immediate value tst r8, #1 << 22 @ if (immediate offset) andne r6, r8, #0xf00 @ { immediate high nibble - orrne r6, r5, r6, lsr #4 @ combine nibbles } else - ldreq r6, [r2, r5, lsl #2] @ { load Rm value } + orrne r6, r9, r6, lsr #4 @ combine nibbles } else + ldreq r6, [r2, r9, lsl #2] @ { load Rm value } .data_arm_apply_r6_and_rn: - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrpreconst: @@ -104,12 +100,12 @@ ENTRY(v4t_late_abort) .data_arm_lateldrpostconst: movs r6, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsr #20 @ Undo increment addeq r7, r7, r6, lsr #20 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrprereg: @@ -118,14 +114,14 @@ ENTRY(v4t_late_abort) .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' - mov r5, r8, lsr #7 @ get shift count - ands r5, r5, #31 + mov r9, r8, lsr #7 @ get shift count + ands r9, r9, #31 and r7, r8, #0x70 @ get shift type orreq r7, r7, #8 @ shift count = 0 add pc, pc, r7 nop - mov r6, r6, lsl r5 @ 0: LSL #!0 + mov r6, r6, lsl r9 @ 0: LSL #!0 b .data_arm_apply_r6_and_rn b .data_arm_apply_r6_and_rn @ 1: LSL #0 nop @@ -133,7 +129,7 @@ ENTRY(v4t_late_abort) nop b .data_unknown @ 3: MUL? nop - mov r6, r6, lsr r5 @ 4: LSR #!0 + mov r6, r6, lsr r9 @ 4: LSR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, lsr #32 @ 5: LSR #32 b .data_arm_apply_r6_and_rn @@ -141,7 +137,7 @@ ENTRY(v4t_late_abort) nop b .data_unknown @ 7: MUL? nop - mov r6, r6, asr r5 @ 8: ASR #!0 + mov r6, r6, asr r9 @ 8: ASR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, asr #32 @ 9: ASR #32 b .data_arm_apply_r6_and_rn @@ -149,7 +145,7 @@ ENTRY(v4t_late_abort) nop b .data_unknown @ B: MUL? nop - mov r6, r6, ror r5 @ C: ROR #!0 + mov r6, r6, ror r9 @ C: ROR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, rrx @ D: RRX b .data_arm_apply_r6_and_rn @@ -216,9 +212,9 @@ ENTRY(v4t_late_abort) and r6, r6, #0x33 add r6, r6, r9, lsr #2 add r6, r6, r6, lsr #4 - and r5, r8, #7 << 8 - ldr r7, [r2, r5, lsr #6] + and r9, r8, #7 << 8 + ldr r7, [r2, r9, lsr #6] and r6, r6, #15 @ number of regs to transfer sub r7, r7, r6, lsl #2 @ always decrement - str r7, [r2, r5, lsr #6] + str r7, [r2, r9, lsr #6] b do_DataAbort diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 4d963114c66..50e3543d03b 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -35,8 +35,7 @@ ENTRY(cpu_arm7_dcache_clean_area) * * Purpose : obtain information about current aborted instruction * - * Returns : r0 = address of abort - * : r1 = FSR + * Returns : r4-r5, r10-r11, r13 preserved */ ENTRY(cpu_arm7_data_abort) @@ -95,21 +94,21 @@ ENTRY(cpu_arm6_data_abort) add r6, r6, r6, lsr #8 add r6, r6, r6, lsr #4 and r6, r6, #15 @ r6 = no. of registers to transfer. - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsl #2 @ Undo increment addeq r7, r7, r6, lsl #2 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_apply_r6_and_rn: - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6 @ Undo incrmenet addeq r7, r7, r6 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrpreconst: @@ -118,12 +117,12 @@ ENTRY(cpu_arm6_data_abort) .data_arm_lateldrpostconst: movs r6, r8, lsl #20 @ Get offset beq do_DataAbort @ zero -> no fixup - and r5, r8, #15 << 16 @ Extract 'n' from instruction - ldr r7, [r2, r5, lsr #14] @ Get register 'Rn' + and r9, r8, #15 << 16 @ Extract 'n' from instruction + ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' tst r8, #1 << 23 @ Check U bit subne r7, r7, r6, lsr #20 @ Undo increment addeq r7, r7, r6, lsr #20 @ Undo decrement - str r7, [r2, r5, lsr #14] @ Put register 'Rn' + str r7, [r2, r9, lsr #14] @ Put register 'Rn' b do_DataAbort .data_arm_lateldrprereg: @@ -132,14 +131,14 @@ ENTRY(cpu_arm6_data_abort) .data_arm_lateldrpostreg: and r7, r8, #15 @ Extract 'm' from instruction ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' - mov r5, r8, lsr #7 @ get shift count - ands r5, r5, #31 + mov r9, r8, lsr #7 @ get shift count + ands r9, r9, #31 and r7, r8, #0x70 @ get shift type orreq r7, r7, #8 @ shift count = 0 add pc, pc, r7 nop - mov r6, r6, lsl r5 @ 0: LSL #!0 + mov r6, r6, lsl r9 @ 0: LSL #!0 b .data_arm_apply_r6_and_rn b .data_arm_apply_r6_and_rn @ 1: LSL #0 nop @@ -147,7 +146,7 @@ ENTRY(cpu_arm6_data_abort) nop b .data_unknown @ 3: MUL? nop - mov r6, r6, lsr r5 @ 4: LSR #!0 + mov r6, r6, lsr r9 @ 4: LSR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, lsr #32 @ 5: LSR #32 b .data_arm_apply_r6_and_rn @@ -155,7 +154,7 @@ ENTRY(cpu_arm6_data_abort) nop b .data_unknown @ 7: MUL? nop - mov r6, r6, asr r5 @ 8: ASR #!0 + mov r6, r6, asr r9 @ 8: ASR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, asr #32 @ 9: ASR #32 b .data_arm_apply_r6_and_rn @@ -163,7 +162,7 @@ ENTRY(cpu_arm6_data_abort) nop b .data_unknown @ B: MUL? nop - mov r6, r6, ror r5 @ C: ROR #!0 + mov r6, r6, ror r9 @ C: ROR #!0 b .data_arm_apply_r6_and_rn mov r6, r6, rrx @ D: RRX b .data_arm_apply_r6_and_rn -- cgit v1.2.3-18-g5258 From 30891c90d81133179cc47eb77c30764a3b5dad5c Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 26 Jun 2011 12:47:08 +0100 Subject: ARM: entry: no need to reload the SPSR value from struct pt_regs The SVC IRQ, prefetch and data abort handlers preserve the SPSR value via r5 across the exception. Rather than re-loading it from pt_regs, use the preserved value instead. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index bbdd443b805..fa02a22a4c4 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -195,10 +195,6 @@ __dabt_svc: @ disable_irq_notrace - @ - @ restore SPSR and restart the instruction - @ - ldr r5, [sp, #S_PSR] #ifdef CONFIG_TRACE_IRQFLAGS tst r5, #PSR_I_BIT bleq trace_hardirqs_on @@ -223,7 +219,7 @@ __irq_svc: tst r0, #_TIF_NEED_RESCHED blne svc_preempt #endif - ldr r5, [sp, #S_PSR] + #ifdef CONFIG_TRACE_IRQFLAGS @ The parent context IRQs must have been enabled to get here in @ the first place, so there's no point checking the PSR I bit. @@ -308,10 +304,6 @@ __pabt_svc: @ disable_irq_notrace - @ - @ restore SPSR and restart the instruction - @ - ldr r5, [sp, #S_PSR] #ifdef CONFIG_TRACE_IRQFLAGS tst r5, #PSR_I_BIT bleq trace_hardirqs_on -- cgit v1.2.3-18-g5258 From 178783622ce0fd629fad21b33b8f8f56b64c5e45 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 3 Jul 2011 23:24:21 +0100 Subject: ARM: dmabounce: fix map_single() error return value When map_single() is unable to obtain a safe buffer, we must return the dma_addr_t error value, which is ~0 rather than 0. Signed-off-by: Russell King --- arch/arm/common/dmabounce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index e5681636626..841df7d21c2 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -255,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, if (buf == 0) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); - return 0; + return ~0; } dev_dbg(dev, -- cgit v1.2.3-18-g5258 From 8021a4a048a85906302bd0236f3d125473be65b1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 3 Jul 2011 16:13:58 +0100 Subject: ARM: dma-mapping: define dma_(un)?map_single in terms of dma_(un)?map_page Use dma_map_page()/dma_unmap_page() internals to handle dma_map_single() and dma_unmap_single(). Signed-off-by: Russell King --- arch/arm/common/dmabounce.c | 28 ---------------------------- arch/arm/include/asm/dma-mapping.h | 31 +++++++++---------------------- 2 files changed, 9 insertions(+), 50 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 841df7d21c2..8a0588b007e 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -328,34 +328,6 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, * substitute the safe buffer for the unsafe one. * (basically move the buffer from an unsafe area to a safe one) */ -dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction dir) -{ - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", - __func__, ptr, size, dir); - - BUG_ON(!valid_dma_direction(dir)); - - return map_single(dev, ptr, size, dir); -} -EXPORT_SYMBOL(__dma_map_single); - -/* - * see if a mapped address was really a "safe" buffer and if so, copy - * the data from the safe buffer back to the unsafe buffer and free up - * the safe buffer. (basically return things back to the way they - * should be) - */ -void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir) -{ - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", - __func__, (void *) dma_addr, size, dir); - - unmap_single(dev, dma_addr, size, dir); -} -EXPORT_SYMBOL(__dma_unmap_single); - dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 4fff837363e..d2903d0c308 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -298,10 +298,6 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); /* * The DMA API, implemented by dmabounce.c. See below for descriptions. */ -extern dma_addr_t __dma_map_single(struct device *, void *, size_t, - enum dma_data_direction); -extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, - enum dma_data_direction); extern dma_addr_t __dma_map_page(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction); extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, @@ -328,13 +324,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, } -static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, - size_t size, enum dma_data_direction dir) -{ - __dma_single_cpu_to_dev(cpu_addr, size, dir); - return virt_to_dma(dev, cpu_addr); -} - static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { @@ -342,12 +331,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, return pfn_to_dma(dev, page_to_pfn(page)) + offset; } -static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); -} - static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { @@ -373,14 +356,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { + unsigned long offset; + struct page *page; dma_addr_t addr; + BUG_ON(!virt_addr_valid(cpu_addr)); + BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); BUG_ON(!valid_dma_direction(dir)); - addr = __dma_map_single(dev, cpu_addr, size, dir); - debug_dma_map_page(dev, virt_to_page(cpu_addr), - (unsigned long)cpu_addr & ~PAGE_MASK, size, - dir, addr, true); + page = virt_to_page(cpu_addr); + offset = (unsigned long)cpu_addr & ~PAGE_MASK; + addr = __dma_map_page(dev, page, offset, size, dir); + debug_dma_map_page(dev, page, offset, size, dir, addr, true); return addr; } @@ -430,7 +417,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { debug_dma_unmap_page(dev, handle, size, dir, true); - __dma_unmap_single(dev, handle, size, dir); + __dma_unmap_page(dev, handle, size, dir); } /** -- cgit v1.2.3-18-g5258 From 71695dd8b9eacfcda1b548a5b1780d34213ad654 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 3 Jul 2011 19:46:39 +0100 Subject: ARM: dmabounce: avoid needless valid_dma_direction() check This check is done at the DMA API level, so there's no point repeating it here. Signed-off-by: Russell King --- arch/arm/common/dmabounce.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 8a0588b007e..3e0fa154858 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -334,8 +334,6 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page, dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", __func__, page, offset, size, dir); - BUG_ON(!valid_dma_direction(dir)); - if (PageHighMem(page)) { dev_err(dev, "DMA buffer bouncing of HIGHMEM pages " "is not supported\n"); -- cgit v1.2.3-18-g5258 From 23bc9873ba60ee661d8e9f3a6b22fc3bcc4b7015 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 3 Jul 2011 22:28:32 +0100 Subject: ARM: dmabounce: separate out decision to bounce Move the decision to perform DMA bouncing out of map_single() into its own stand-alone function. Signed-off-by: Russell King --- arch/arm/common/dmabounce.c | 46 +++++++++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 18 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 3e0fa154858..643e1d66067 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -219,36 +219,46 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev, return find_safe_buffer(dev->archdata.dmabounce, dma_addr); } -static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction dir) +static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) { - struct dmabounce_device_info *device_info = dev->archdata.dmabounce; - dma_addr_t dma_addr; - int needs_bounce = 0; - - if (device_info) - DO_STATS ( device_info->map_op_count++ ); - - dma_addr = virt_to_dma(dev, ptr); + if (!dev || !dev->archdata.dmabounce) + return 0; if (dev->dma_mask) { - unsigned long mask = *dev->dma_mask; - unsigned long limit; + unsigned long limit, mask = *dev->dma_mask; limit = (mask + 1) & ~mask; if (limit && size > limit) { dev_err(dev, "DMA mapping too big (requested %#x " "mask %#Lx)\n", size, *dev->dma_mask); - return ~0; + return -E2BIG; } - /* - * Figure out if we need to bounce from the DMA mask. - */ - needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; + /* Figure out if we need to bounce from the DMA mask. */ + if ((dma_addr | (dma_addr + size - 1)) & ~mask) + return 1; } - if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { + return dma_needs_bounce(dev, dma_addr, size) ? 1 : 0; +} + +static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction dir) +{ + struct dmabounce_device_info *device_info = dev->archdata.dmabounce; + dma_addr_t dma_addr; + int ret; + + if (device_info) + DO_STATS ( device_info->map_op_count++ ); + + dma_addr = virt_to_dma(dev, ptr); + + ret = needs_bounce(dev, dma_addr, size); + if (ret < 0) + return ~0; + + if (ret > 0) { struct safe_buffer *buf; buf = alloc_safe_buffer(device_info, ptr, size, dir); -- cgit v1.2.3-18-g5258 From dd3641fc3cf6d10b7cf4e266c2f651517779e727 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 3 Jul 2011 22:39:43 +0100 Subject: ARM: dmabounce: move decision for bouncing into __dma_map_page() Move the decision whether to bounce into __dma_map_page(), before the check for high pages. This avoids triggering the high page check for devices which aren't using dmabounce. Fix the unmap path to cope too. Signed-off-by: Russell King --- arch/arm/common/dmabounce.c | 124 +++++++++++++++++++++----------------------- 1 file changed, 58 insertions(+), 66 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 643e1d66067..6ae292cc43b 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -246,88 +246,58 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { struct dmabounce_device_info *device_info = dev->archdata.dmabounce; - dma_addr_t dma_addr; - int ret; + struct safe_buffer *buf; if (device_info) DO_STATS ( device_info->map_op_count++ ); - dma_addr = virt_to_dma(dev, ptr); - - ret = needs_bounce(dev, dma_addr, size); - if (ret < 0) + buf = alloc_safe_buffer(device_info, ptr, size, dir); + if (buf == 0) { + dev_err(dev, "%s: unable to map unsafe buffer %p!\n", + __func__, ptr); return ~0; + } - if (ret > 0) { - struct safe_buffer *buf; - - buf = alloc_safe_buffer(device_info, ptr, size, dir); - if (buf == 0) { - dev_err(dev, "%s: unable to map unsafe buffer %p!\n", - __func__, ptr); - return ~0; - } - - dev_dbg(dev, - "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), - buf->safe, buf->safe_dma_addr); - - if ((dir == DMA_TO_DEVICE) || - (dir == DMA_BIDIRECTIONAL)) { - dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", - __func__, ptr, buf->safe, size); - memcpy(buf->safe, ptr, size); - } - ptr = buf->safe; + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), + buf->safe, buf->safe_dma_addr); - dma_addr = buf->safe_dma_addr; - } else { - /* - * We don't need to sync the DMA buffer since - * it was allocated via the coherent allocators. - */ - __dma_single_cpu_to_dev(ptr, size, dir); + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { + dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", + __func__, ptr, buf->safe, size); + memcpy(buf->safe, ptr, size); } - return dma_addr; + return buf->safe_dma_addr; } -static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, +static inline void unmap_single(struct device *dev, struct safe_buffer *buf, size_t size, enum dma_data_direction dir) { - struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); - - if (buf) { - BUG_ON(buf->size != size); - BUG_ON(buf->direction != dir); + BUG_ON(buf->size != size); + BUG_ON(buf->direction != dir); - dev_dbg(dev, - "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), - buf->safe, buf->safe_dma_addr); + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), + buf->safe, buf->safe_dma_addr); - DO_STATS(dev->archdata.dmabounce->bounce_count++); + DO_STATS(dev->archdata.dmabounce->bounce_count++); - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { - void *ptr = buf->ptr; + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { + void *ptr = buf->ptr; - dev_dbg(dev, - "%s: copy back safe %p to unsafe %p size %d\n", - __func__, buf->safe, ptr, size); - memcpy(ptr, buf->safe, size); + dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", + __func__, buf->safe, ptr, size); + memcpy(ptr, buf->safe, size); - /* - * Since we may have written to a page cache page, - * we need to ensure that the data will be coherent - * with user mappings. - */ - __cpuc_flush_dcache_area(ptr, size); - } - free_safe_buffer(dev->archdata.dmabounce, buf); - } else { - __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir); + /* + * Since we may have written to a page cache page, + * we need to ensure that the data will be coherent + * with user mappings. + */ + __cpuc_flush_dcache_area(ptr, size); } + free_safe_buffer(dev->archdata.dmabounce, buf); } /* ************************************************** */ @@ -341,12 +311,25 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { + dma_addr_t dma_addr; + int ret; + dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", __func__, page, offset, size, dir); + dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; + + ret = needs_bounce(dev, dma_addr, size); + if (ret < 0) + return ~0; + + if (ret == 0) { + __dma_page_cpu_to_dev(page, offset, size, dir); + return dma_addr; + } + if (PageHighMem(page)) { - dev_err(dev, "DMA buffer bouncing of HIGHMEM pages " - "is not supported\n"); + dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); return ~0; } @@ -363,10 +346,19 @@ EXPORT_SYMBOL(__dma_map_page); void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { + struct safe_buffer *buf; + dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", __func__, (void *) dma_addr, size, dir); - unmap_single(dev, dma_addr, size, dir); + buf = find_safe_buffer_dev(dev, dma_addr, __func__); + if (!buf) { + __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), + dma_addr & ~PAGE_MASK, size, dir); + return; + } + + unmap_single(dev, buf, size, dir); } EXPORT_SYMBOL(__dma_unmap_page); -- cgit v1.2.3-18-g5258 From e2f521e247576c897e1bf0ada801e87c7e2db89f Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 3 Jul 2011 23:53:13 +0100 Subject: ARM: dmabounce: remove useless pr_err We already check that dev != NULL, so this won't be reached. Signed-off-by: Russell King --- arch/arm/common/dmabounce.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 6ae292cc43b..0077c1b7d7f 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -210,10 +210,7 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev, if (!dev || !dev->archdata.dmabounce) return NULL; if (dma_mapping_error(dev, dma_addr)) { - if (dev) - dev_err(dev, "Trying to %s invalid mapping\n", where); - else - pr_err("unknown device: Trying to %s invalid mapping\n", where); + dev_err(dev, "Trying to %s invalid mapping\n", where); return NULL; } return find_safe_buffer(dev->archdata.dmabounce, dma_addr); -- cgit v1.2.3-18-g5258 From dfa322fceb15227c07670c415e835d2dfa8a4307 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 3 Jul 2011 23:54:34 +0100 Subject: ARM: dmabounce: check pointer against NULL not 0 Pointers should be checked against NULL rather than 0, otherwise we get sparse warnings. Signed-off-by: Russell King --- arch/arm/common/dmabounce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 0077c1b7d7f..b4a8759e0fa 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -249,7 +249,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, DO_STATS ( device_info->map_op_count++ ); buf = alloc_safe_buffer(device_info, ptr, size, dir); - if (buf == 0) { + if (buf == NULL) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); return ~0; -- cgit v1.2.3-18-g5258 From c289b2e0ccff1142908e20398930dc2e14697e74 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 3 Jul 2011 23:56:17 +0100 Subject: ARM: dmabounce: correct unmap_single dev_dbg DMA addresses should not be casted to void * for printing. Fix that to be consistent with the rest of the file. Signed-off-by: Russell King --- arch/arm/common/dmabounce.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index b4a8759e0fa..4f13505ac93 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -345,8 +345,8 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, { struct safe_buffer *buf; - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", - __func__, (void *) dma_addr, size, dir); + dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", + __func__, dma_addr, size, dir); buf = find_safe_buffer_dev(dev, dma_addr, __func__); if (!buf) { -- cgit v1.2.3-18-g5258 From 0703ed2a6b260cd743adf49a8281eb064d728832 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 4 Jul 2011 08:32:21 +0100 Subject: ARM: dmabounce: get rid of dma_needs_bounce global function Pass the device type specific needs_bounce function in at dmabounce register time, avoiding the need for a platform specific global function to do this. Signed-off-by: Russell King --- arch/arm/common/dmabounce.c | 8 +++-- arch/arm/common/it8152.c | 17 +++++------ arch/arm/common/sa1111.c | 60 ++++++++++++++++++++------------------ arch/arm/include/asm/dma-mapping.h | 22 ++------------ arch/arm/mach-ixp4xx/common-pci.c | 12 ++++---- 5 files changed, 53 insertions(+), 66 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 4f13505ac93..595ecd290eb 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -79,6 +79,8 @@ struct dmabounce_device_info { struct dmabounce_pool large; rwlock_t lock; + + int (*needs_bounce)(struct device *, dma_addr_t, size_t); }; #ifdef STATS @@ -236,7 +238,7 @@ static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) return 1; } - return dma_needs_bounce(dev, dma_addr, size) ? 1 : 0; + return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); } static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, @@ -430,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, } int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, - unsigned long large_buffer_size) + unsigned long large_buffer_size, + int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) { struct dmabounce_device_info *device_info; int ret; @@ -466,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, device_info->dev = dev; INIT_LIST_HEAD(&device_info->safe_buffers); rwlock_init(&device_info->lock); + device_info->needs_bounce = needs_bounce_fn; #ifdef STATS device_info->total_allocs = 0; diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 7a21927c52e..80b49e1e099 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c @@ -243,6 +243,13 @@ static struct resource it8152_mem = { * ITE8152 chip can address up to 64MByte, so all the devices * connected to ITE8152 (PCI and USB) should have limited DMA window */ +static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) +{ + dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", + __func__, dma_addr, size); + return dev->bus == &pci_bus_type && + (dma_addr + size - PHYS_OFFSET) >= SZ_64M; +} /* * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all @@ -254,7 +261,7 @@ static int it8152_pci_platform_notify(struct device *dev) if (dev->dma_mask) *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; - dmabounce_register_dev(dev, 2048, 4096); + dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce); } return 0; } @@ -267,14 +274,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev) return 0; } -int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) -{ - dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", - __func__, dma_addr, size); - return (dev->bus == &pci_bus_type) && - ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); -} - int dma_set_coherent_mask(struct device *dev, u64 mask) { if (mask >= PHYS_OFFSET + SZ_64M - 1) diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 9c49a46a2b7..0569de6acfb 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac, sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; } +#endif +#ifdef CONFIG_DMABOUNCE +/* + * According to the "Intel StrongARM SA-1111 Microprocessor Companion + * Chip Specification Update" (June 2000), erratum #7, there is a + * significant bug in the SA1111 SDRAM shared memory controller. If + * an access to a region of memory above 1MB relative to the bank base, + * it is important that address bit 10 _NOT_ be asserted. Depending + * on the configuration of the RAM, bit 10 may correspond to one + * of several different (processor-relative) address bits. + * + * This routine only identifies whether or not a given DMA address + * is susceptible to the bug. + * + * This should only get called for sa1111_device types due to the + * way we configure our device dma_masks. + */ +static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) +{ + /* + * Section 4.6 of the "Intel StrongARM SA-1111 Development Module + * User's Guide" mentions that jumpers R51 and R52 control the + * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or + * SDRAM bank 1 on Neponset). The default configuration selects + * Assabet, so any address in bank 1 is necessarily invalid. + */ + return (machine_is_assabet() || machine_is_pfs168()) && + (addr >= 0xc8000000 || (addr + size) >= 0xc8000000); +} #endif static void sa1111_dev_release(struct device *_dev) @@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, dev->dev.dma_mask = &dev->dma_mask; if (dev->dma_mask != 0xffffffffUL) { - ret = dmabounce_register_dev(&dev->dev, 1024, 4096); + ret = dmabounce_register_dev(&dev->dev, 1024, 4096, + sa1111_needs_bounce); if (ret) { dev_err(&dev->dev, "SA1111: Failed to register" " with dmabounce\n"); @@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip) kfree(sachip); } -/* - * According to the "Intel StrongARM SA-1111 Microprocessor Companion - * Chip Specification Update" (June 2000), erratum #7, there is a - * significant bug in the SA1111 SDRAM shared memory controller. If - * an access to a region of memory above 1MB relative to the bank base, - * it is important that address bit 10 _NOT_ be asserted. Depending - * on the configuration of the RAM, bit 10 may correspond to one - * of several different (processor-relative) address bits. - * - * This routine only identifies whether or not a given DMA address - * is susceptible to the bug. - * - * This should only get called for sa1111_device types due to the - * way we configure our device dma_masks. - */ -int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) -{ - /* - * Section 4.6 of the "Intel StrongARM SA-1111 Development Module - * User's Guide" mentions that jumpers R51 and R52 control the - * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or - * SDRAM bank 1 on Neponset). The default configuration selects - * Assabet, so any address in bank 1 is necessarily invalid. - */ - return ((machine_is_assabet() || machine_is_pfs168()) && - (addr >= 0xc8000000 || (addr + size) >= 0xc8000000)); -} - struct sa1111_save_data { unsigned int skcr; unsigned int skpcr; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index d2903d0c308..4ad25337f92 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -256,14 +256,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, * @dev: valid struct device pointer * @small_buf_size: size of buffers to use with small buffer pool * @large_buf_size: size of buffers to use with large buffer pool (can be 0) + * @needs_bounce_fn: called to determine whether buffer needs bouncing * * This function should be called by low-level platform code to register * a device as requireing DMA buffer bouncing. The function will allocate * appropriate DMA pools for the device. - * */ extern int dmabounce_register_dev(struct device *, unsigned long, - unsigned long); + unsigned long, int (*)(struct device *, dma_addr_t, size_t)); /** * dmabounce_unregister_dev @@ -277,24 +277,6 @@ extern int dmabounce_register_dev(struct device *, unsigned long, */ extern void dmabounce_unregister_dev(struct device *); -/** - * dma_needs_bounce - * - * @dev: valid struct device pointer - * @dma_handle: dma_handle of unbounced buffer - * @size: size of region being mapped - * - * Platforms that utilize the dmabounce mechanism must implement - * this function. - * - * The dmabounce routines call this function whenever a dma-mapping - * is requested to determine whether a given buffer needs to be bounced - * or not. The function must return 0 if the buffer is OK for - * DMA access and 1 if the buffer needs to be bounced. - * - */ -extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); - /* * The DMA API, implemented by dmabounce.c. See below for descriptions. */ diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index e9a58939572..d7db10cfe53 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -316,6 +316,11 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r } +static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) +{ + return dev->bus == &pci_bus_type && (dma_addr + size) >= SZ_64M; +} + /* * Setup DMA mask to 64MB on PCI devices. Ignore all other devices. */ @@ -324,7 +329,7 @@ static int ixp4xx_pci_platform_notify(struct device *dev) if(dev->bus == &pci_bus_type) { *dev->dma_mask = SZ_64M - 1; dev->coherent_dma_mask = SZ_64M - 1; - dmabounce_register_dev(dev, 2048, 4096); + dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); } return 0; } @@ -337,11 +342,6 @@ static int ixp4xx_pci_platform_notify_remove(struct device *dev) return 0; } -int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) -{ - return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M); -} - void __init ixp4xx_pci_preinit(void) { unsigned long cpuid = read_cpuid_id(); -- cgit v1.2.3-18-g5258 From 5c8598fca1a359520623ed5409d23bb9e23d7da3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 4 Jul 2011 08:34:33 +0100 Subject: ARM: dmabounce: no need to check dev->bus type in needs_bounce function As the needs_bounce function is passed at DMA bounce register time, we already know what the device bus type is, so we don't need to check it each time the needs_bounce function is called. Signed-off-by: Russell King --- arch/arm/common/it8152.c | 3 +-- arch/arm/mach-ixp4xx/common-pci.c | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 80b49e1e099..14ad62e16dd 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c @@ -247,8 +247,7 @@ static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t s { dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", __func__, dma_addr, size); - return dev->bus == &pci_bus_type && - (dma_addr + size - PHYS_OFFSET) >= SZ_64M; + return (dma_addr + size - PHYS_OFFSET) >= SZ_64M; } /* diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index d7db10cfe53..e2e98bbb641 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -318,7 +318,7 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) { - return dev->bus == &pci_bus_type && (dma_addr + size) >= SZ_64M; + return (dma_addr + size) >= SZ_64M; } /* -- cgit v1.2.3-18-g5258 From 07ad6ab3d79ede41cd8a69499e81df7b405635d2 Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Mon, 4 Jul 2011 03:56:15 -0700 Subject: omap: drop __initdata tags from static struct platform_device declarations Pointers to statically declared platform device structures which are registered with platform_device_register() are then used during run time to access these structure members, for example from platform_uevent() and much more. Therefore, these structures should never be placed inside sections which are dropped after boot. Fix platform devices incorrectly tagged with __initdata which happen to exist inside OMAP sub-trees. This bug has exhibited itself on my ARM/OMAP1 based Amstrad Delta videophone after commit 6d3163ce86dd386b4f7bda80241d7fea2bc0bb1d, "mm: check if any page in a pageblock is reserved before marking it MIGRATE_RESERVE", resulting in reading from several /sys/device/platform/*/uevent files always ending up with segmentation faults. Signed-off-by: Janusz Krzysztofik Acked-by: Felipe Balbi Cc: Varadarajan, Charulatha Cc: Jarkko Nikula Signed-off-by: Tony Lindgren --- arch/arm/mach-omap1/board-ams-delta.c | 8 ++++---- arch/arm/mach-omap1/gpio15xx.c | 4 ++-- arch/arm/mach-omap1/gpio16xx.c | 10 +++++----- arch/arm/mach-omap1/gpio7xx.c | 14 +++++++------- arch/arm/mach-omap2/board-rx51-peripherals.c | 2 +- 5 files changed, 19 insertions(+), 19 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index de88c9297b6..f49ce85d244 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -215,7 +215,7 @@ static struct omap_kp_platform_data ams_delta_kp_data __initdata = { .delay = 9, }; -static struct platform_device ams_delta_kp_device __initdata = { +static struct platform_device ams_delta_kp_device = { .name = "omap-keypad", .id = -1, .dev = { @@ -225,12 +225,12 @@ static struct platform_device ams_delta_kp_device __initdata = { .resource = ams_delta_kp_resources, }; -static struct platform_device ams_delta_lcd_device __initdata = { +static struct platform_device ams_delta_lcd_device = { .name = "lcd_ams_delta", .id = -1, }; -static struct platform_device ams_delta_led_device __initdata = { +static struct platform_device ams_delta_led_device = { .name = "ams-delta-led", .id = -1 }; @@ -267,7 +267,7 @@ static struct soc_camera_link ams_delta_iclink = { .power = ams_delta_camera_power, }; -static struct platform_device ams_delta_camera_device __initdata = { +static struct platform_device ams_delta_camera_device = { .name = "soc-camera-pdrv", .id = 0, .dev = { diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c index 04c4b04cf54..364137c2042 100644 --- a/arch/arm/mach-omap1/gpio15xx.c +++ b/arch/arm/mach-omap1/gpio15xx.c @@ -41,7 +41,7 @@ static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = { .bank_stride = 1, }; -static struct __initdata platform_device omap15xx_mpu_gpio = { +static struct platform_device omap15xx_mpu_gpio = { .name = "omap_gpio", .id = 0, .dev = { @@ -70,7 +70,7 @@ static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = { .bank_width = 16, }; -static struct __initdata platform_device omap15xx_gpio = { +static struct platform_device omap15xx_gpio = { .name = "omap_gpio", .id = 1, .dev = { diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c index 5dd0d4c82b2..293a246e282 100644 --- a/arch/arm/mach-omap1/gpio16xx.c +++ b/arch/arm/mach-omap1/gpio16xx.c @@ -44,7 +44,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = { .bank_stride = 1, }; -static struct __initdata platform_device omap16xx_mpu_gpio = { +static struct platform_device omap16xx_mpu_gpio = { .name = "omap_gpio", .id = 0, .dev = { @@ -73,7 +73,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = { .bank_width = 16, }; -static struct __initdata platform_device omap16xx_gpio1 = { +static struct platform_device omap16xx_gpio1 = { .name = "omap_gpio", .id = 1, .dev = { @@ -102,7 +102,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = { .bank_width = 16, }; -static struct __initdata platform_device omap16xx_gpio2 = { +static struct platform_device omap16xx_gpio2 = { .name = "omap_gpio", .id = 2, .dev = { @@ -131,7 +131,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = { .bank_width = 16, }; -static struct __initdata platform_device omap16xx_gpio3 = { +static struct platform_device omap16xx_gpio3 = { .name = "omap_gpio", .id = 3, .dev = { @@ -160,7 +160,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = { .bank_width = 16, }; -static struct __initdata platform_device omap16xx_gpio4 = { +static struct platform_device omap16xx_gpio4 = { .name = "omap_gpio", .id = 4, .dev = { diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c index 1204c8b871a..c6ad248d63a 100644 --- a/arch/arm/mach-omap1/gpio7xx.c +++ b/arch/arm/mach-omap1/gpio7xx.c @@ -46,7 +46,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = { .bank_stride = 2, }; -static struct __initdata platform_device omap7xx_mpu_gpio = { +static struct platform_device omap7xx_mpu_gpio = { .name = "omap_gpio", .id = 0, .dev = { @@ -75,7 +75,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = { .bank_width = 32, }; -static struct __initdata platform_device omap7xx_gpio1 = { +static struct platform_device omap7xx_gpio1 = { .name = "omap_gpio", .id = 1, .dev = { @@ -104,7 +104,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = { .bank_width = 32, }; -static struct __initdata platform_device omap7xx_gpio2 = { +static struct platform_device omap7xx_gpio2 = { .name = "omap_gpio", .id = 2, .dev = { @@ -133,7 +133,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = { .bank_width = 32, }; -static struct __initdata platform_device omap7xx_gpio3 = { +static struct platform_device omap7xx_gpio3 = { .name = "omap_gpio", .id = 3, .dev = { @@ -162,7 +162,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = { .bank_width = 32, }; -static struct __initdata platform_device omap7xx_gpio4 = { +static struct platform_device omap7xx_gpio4 = { .name = "omap_gpio", .id = 4, .dev = { @@ -191,7 +191,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = { .bank_width = 32, }; -static struct __initdata platform_device omap7xx_gpio5 = { +static struct platform_device omap7xx_gpio5 = { .name = "omap_gpio", .id = 5, .dev = { @@ -220,7 +220,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = { .bank_width = 32, }; -static struct __initdata platform_device omap7xx_gpio6 = { +static struct platform_device omap7xx_gpio6 = { .name = "omap_gpio", .id = 6, .dev = { diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index 990366726c5..88bd6f7705f 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -558,7 +558,7 @@ static struct radio_si4713_platform_data rx51_si4713_data __initdata_or_module = .subdev_board_info = &rx51_si4713_board_info, }; -static struct platform_device rx51_si4713_dev __initdata_or_module = { +static struct platform_device rx51_si4713_dev = { .name = "radio-si4713", .id = -1, .dev = { -- cgit v1.2.3-18-g5258 From 64393b3ae4e3cc86e2d622f682d736ec973364b6 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 1 Jul 2011 12:25:24 +0200 Subject: AT91: Change nand buswidth logic to match hardware default configuration The recently modified nand buswitth configuration is not aligned with board reality: the double footprint on boards is always populated with 8bits buswidth nand flashes. So we have to consider that without particular configuration the 8bits buswidth is selected by default. Moreover, the previous logic was always using !board_have_nand_8bit(), we change it to a simpler: board_have_nand_16bit(). Signed-off-by: Nicolas Ferre Tested-by: Ludovic Desroches Signed-off-by: Arnd Bergmann --- arch/arm/mach-at91/board-cap9adk.c | 2 +- arch/arm/mach-at91/board-sam9260ek.c | 2 +- arch/arm/mach-at91/board-sam9261ek.c | 2 +- arch/arm/mach-at91/board-sam9263ek.c | 2 +- arch/arm/mach-at91/board-sam9g20ek.c | 2 +- arch/arm/mach-at91/board-sam9m10g45ek.c | 2 +- arch/arm/mach-at91/include/mach/system_rev.h | 10 +++++----- 7 files changed, 11 insertions(+), 11 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-at91/board-cap9adk.c b/arch/arm/mach-at91/board-cap9adk.c index 1904fdf8761..cdb65d48325 100644 --- a/arch/arm/mach-at91/board-cap9adk.c +++ b/arch/arm/mach-at91/board-cap9adk.c @@ -215,7 +215,7 @@ static void __init cap9adk_add_device_nand(void) csa = at91_sys_read(AT91_MATRIX_EBICSA); at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V); - cap9adk_nand_data.bus_width_16 = !board_have_nand_8bit(); + cap9adk_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (cap9adk_nand_data.bus_width_16) cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c index d600dc12322..5c240743c5b 100644 --- a/arch/arm/mach-at91/board-sam9260ek.c +++ b/arch/arm/mach-at91/board-sam9260ek.c @@ -214,7 +214,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { static void __init ek_add_device_nand(void) { - ek_nand_data.bus_width_16 = !board_have_nand_8bit(); + ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c index f897f84d43d..b60c22b6e24 100644 --- a/arch/arm/mach-at91/board-sam9261ek.c +++ b/arch/arm/mach-at91/board-sam9261ek.c @@ -220,7 +220,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { static void __init ek_add_device_nand(void) { - ek_nand_data.bus_width_16 = !board_have_nand_8bit(); + ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c index 605b26f40a4..9bbdc92ea19 100644 --- a/arch/arm/mach-at91/board-sam9263ek.c +++ b/arch/arm/mach-at91/board-sam9263ek.c @@ -221,7 +221,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { static void __init ek_add_device_nand(void) { - ek_nand_data.bus_width_16 = !board_have_nand_8bit(); + ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c index 7624cf0d006..1325a50101a 100644 --- a/arch/arm/mach-at91/board-sam9g20ek.c +++ b/arch/arm/mach-at91/board-sam9g20ek.c @@ -198,7 +198,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { static void __init ek_add_device_nand(void) { - ek_nand_data.bus_width_16 = !board_have_nand_8bit(); + ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c index 063c95d0e8f..33eaa135f24 100644 --- a/arch/arm/mach-at91/board-sam9m10g45ek.c +++ b/arch/arm/mach-at91/board-sam9m10g45ek.c @@ -178,7 +178,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { static void __init ek_add_device_nand(void) { - ek_nand_data.bus_width_16 = !board_have_nand_8bit(); + ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h index b855ee75f72..8f4866045b4 100644 --- a/arch/arm/mach-at91/include/mach/system_rev.h +++ b/arch/arm/mach-at91/include/mach/system_rev.h @@ -13,13 +13,13 @@ * the 16-31 bit are reserved for at91 generic information * * bit 31: - * 0 => nand 16 bit - * 1 => nand 8 bit + * 0 => nand 8 bit + * 1 => nand 16 bit */ -#define BOARD_HAVE_NAND_8BIT (1 << 31) -static int inline board_have_nand_8bit(void) +#define BOARD_HAVE_NAND_16BIT (1 << 31) +static inline int board_have_nand_16bit(void) { - return system_rev & BOARD_HAVE_NAND_8BIT; + return system_rev & BOARD_HAVE_NAND_16BIT; } #endif /* __ARCH_SYSTEM_REV_H__ */ -- cgit v1.2.3-18-g5258 From f4f38430c94c38187db73a2cf3892cc8b12a2713 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 1 Jul 2011 14:38:12 +0100 Subject: ARM: 6989/1: perf: do not start the PMU when no events are present armpmu_enable can be called in situations where no events are present (for example, from the event rotation tick after a profiled task has exited). In this case, we currently start the PMU anyway which may leave it active inevitably without any events being monitored. This patch adds a simple check to the enabling code so that we avoid starting the PMU when no events are present. Cc: Reported-by: Ashwin Chaugle Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/perf_event.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d53c0abc4dd..2b5b1421596 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -583,7 +583,7 @@ static int armpmu_event_init(struct perf_event *event) static void armpmu_enable(struct pmu *pmu) { /* Enable all of the perf events on hardware. */ - int idx; + int idx, enabled = 0; struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!armpmu) @@ -596,9 +596,11 @@ static void armpmu_enable(struct pmu *pmu) continue; armpmu->enable(&event->hw, idx); + enabled = 1; } - armpmu->start(); + if (enabled) + armpmu->start(); } static void armpmu_disable(struct pmu *pmu) -- cgit v1.2.3-18-g5258 From 0371d3f7e8f1cddaee1f215e42c09a40e235d810 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 5 Jul 2011 19:58:29 +0100 Subject: ARM: move memory layout sanity checking before meminfo initialization Ensure that the meminfo array is sanity checked before we pass the memory to memblock. This helps to ensure that memblock and meminfo agree on the dimensions of memory, especially when more memory is passed than the kernel can deal with. Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 2 ++ arch/arm/mm/mmu.c | 5 +++-- arch/arm/mm/nommu.c | 4 ++++ 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index ed11fb08b05..acbb447ac6b 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -73,6 +73,7 @@ __setup("fpe=", fpe_setup); #endif extern void paging_init(struct machine_desc *desc); +extern void sanity_check_meminfo(void); extern void reboot_setup(char *str); unsigned int processor_id; @@ -900,6 +901,7 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + sanity_check_meminfo(); arm_memblock_init(&meminfo, mdesc); paging_init(mdesc); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9d9e736c2b4..594d677b92c 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -759,7 +759,7 @@ early_param("vmalloc", early_vmalloc); static phys_addr_t lowmem_limit __initdata = 0; -static void __init sanity_check_meminfo(void) +void __init sanity_check_meminfo(void) { int i, j, highmem = 0; @@ -1032,8 +1032,9 @@ void __init paging_init(struct machine_desc *mdesc) { void *zero_page; + memblock_set_current_limit(lowmem_limit); + build_mem_type_table(); - sanity_check_meminfo(); prepare_page_table(); map_lowmem(); devicemaps_init(mdesc); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 687d02319a4..941a98c9e8a 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -27,6 +27,10 @@ void __init arm_mm_memblock_reserve(void) memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); } +void __init sanity_check_meminfo(void) +{ +} + /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. -- cgit v1.2.3-18-g5258 From a0d8efedb203b5b908dd46cea38201761e2380f9 Mon Sep 17 00:00:00 2001 From: Thomas Abraham Date: Thu, 16 Jun 2011 16:12:35 +0900 Subject: ARM: EXYNOS4: Fix card detection for sdhci 0 and 2 On SMDKV310 board, a card detect gpio pin is available that is directly connected to the io pad of the sdhci controller. Fix incorrect value of cd_type field in platform data for sdhci instance 0 and 2. Signed-off-by: Thomas Abraham Signed-off-by: Kukjin Kim --- arch/arm/mach-exynos4/mach-smdkv310.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-exynos4/mach-smdkv310.c b/arch/arm/mach-exynos4/mach-smdkv310.c index 152676471b6..edd814110da 100644 --- a/arch/arm/mach-exynos4/mach-smdkv310.c +++ b/arch/arm/mach-exynos4/mach-smdkv310.c @@ -78,9 +78,7 @@ static struct s3c2410_uartcfg smdkv310_uartcfgs[] __initdata = { }; static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = { - .cd_type = S3C_SDHCI_CD_GPIO, - .ext_cd_gpio = EXYNOS4_GPK0(2), - .ext_cd_gpio_invert = 1, + .cd_type = S3C_SDHCI_CD_INTERNAL, .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, #ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT .max_width = 8, @@ -96,9 +94,7 @@ static struct s3c_sdhci_platdata smdkv310_hsmmc1_pdata __initdata = { }; static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = { - .cd_type = S3C_SDHCI_CD_GPIO, - .ext_cd_gpio = EXYNOS4_GPK2(2), - .ext_cd_gpio_invert = 1, + .cd_type = S3C_SDHCI_CD_INTERNAL, .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, #ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT .max_width = 8, -- cgit v1.2.3-18-g5258 From a3df1d4811bb7710d6497334e3b6a37064527684 Mon Sep 17 00:00:00 2001 From: Naveen Krishna Chatradhi Date: Thu, 16 Jun 2011 19:33:29 +0900 Subject: ARM: EXYNOS4: fix improper gpio configuration These pins are incorrectly configured for PCM2 configure them to SPDIF(_OUT & _EXT_CLK) Signed-off-by: Naveen Krishna Chatradhi Acked-by: Jassi Brar Signed-off-by: Kukjin Kim --- arch/arm/mach-exynos4/dev-audio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-exynos4/dev-audio.c b/arch/arm/mach-exynos4/dev-audio.c index 1eed5f9f7bd..983069a5323 100644 --- a/arch/arm/mach-exynos4/dev-audio.c +++ b/arch/arm/mach-exynos4/dev-audio.c @@ -330,7 +330,7 @@ struct platform_device exynos4_device_ac97 = { static int exynos4_spdif_cfg_gpio(struct platform_device *pdev) { - s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(3)); + s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(4)); return 0; } -- cgit v1.2.3-18-g5258 From bb8bb57b213f63ffba29b3a7f1c7974782b8127d Mon Sep 17 00:00:00 2001 From: MyungJoo Ham Date: Wed, 22 Jun 2011 18:03:21 +0900 Subject: ARM: SAMSUNG: header file revised to prevent declaring duplicated There has been no #ifndef - #define - #endif protection for this header file. Signed-off-by: MyungJoo Ham Signed-off-by: Kukjin Kim --- arch/arm/plat-samsung/include/plat/devs.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h index 4af108ff411..e3b31c26ac3 100644 --- a/arch/arm/plat-samsung/include/plat/devs.h +++ b/arch/arm/plat-samsung/include/plat/devs.h @@ -12,6 +12,10 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + +#ifndef __PLAT_DEVS_H +#define __PLAT_DEVS_H __FILE__ + #include struct s3c24xx_uart_resources { @@ -159,3 +163,5 @@ extern struct platform_device s3c_device_ac97; */ extern void *s3c_set_platdata(void *pd, size_t pdsize, struct platform_device *pdev); + +#endif /* __PLAT_DEVS_H */ -- cgit v1.2.3-18-g5258 From 27ea7fd2889eb93041480b18de655b1ff003e05d Mon Sep 17 00:00:00 2001 From: Sangbeom Kim Date: Thu, 23 Jun 2011 21:43:58 +0900 Subject: ARM: S5P: Fix bug on init of PWMTimers for HRTimer This patch fixes following. <6>[ 0.000000] sched_clock: 32 bits at 33MHz, ... <6>[ 128.651309] Calibrating delay loop... There is a big jump. The reason is that PWM Timer which is for HRTimer was used before its initialization. So this patch changes its order and following is kernel boot log message after this. <6>[ 0.000000] sched_clock: 32 bits at 33MHz, ... <6>[ 0.000088] Calibrating delay loop... Signed-off-by: Sangbeom Kim Signed-off-by: Kukjin Kim --- arch/arm/plat-s5p/s5p-time.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-s5p/s5p-time.c index 899a8cc011f..612934c48b0 100644 --- a/arch/arm/plat-s5p/s5p-time.c +++ b/arch/arm/plat-s5p/s5p-time.c @@ -370,11 +370,11 @@ static void __init s5p_clocksource_init(void) clock_rate = clk_get_rate(tin_source); - init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate); - s5p_time_setup(timer_source.source_id, TCNT_MAX); s5p_time_start(timer_source.source_id, PERIODIC); + init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate); + if (clocksource_register_hz(&time_clocksource, clock_rate)) panic("%s: can't register clocksource\n", time_clocksource.name); } -- cgit v1.2.3-18-g5258 From 152c036a8096ed219c965957acb63490a2c0c963 Mon Sep 17 00:00:00 2001 From: MyungJoo Ham Date: Mon, 4 Jul 2011 19:03:54 +0900 Subject: ARM: EXYNOS4: Address a section mismatch w/ suspend issue. The section mismatch in headsmp.S made hotplug stop working after the first instance of suspend-to-RAM and its wakeup. Signed-off-by: MyungJoo Ham Signed-off-by: Kyungmin Park Signed-off-by: Kukjin Kim --- arch/arm/mach-exynos4/headsmp.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-exynos4/headsmp.S b/arch/arm/mach-exynos4/headsmp.S index 6c6cfc50c46..3cdeb364754 100644 --- a/arch/arm/mach-exynos4/headsmp.S +++ b/arch/arm/mach-exynos4/headsmp.S @@ -13,7 +13,7 @@ #include #include - __INIT + __CPUINIT /* * exynos4 specific entry point for secondary CPUs. This provides -- cgit v1.2.3-18-g5258 From 8918034dfb7b0f625ba9eb0329d5750a9573f62e Mon Sep 17 00:00:00 2001 From: Padmavathi Venna Date: Tue, 5 Jul 2011 17:13:56 +0900 Subject: ARM: SAMSUNG: Add tx_st_done variable tx_st_done is required for checking the transmission status of SPI channels with different fifo levels Signed-off-by: Padmavathi Venna Acked-by: Jassi Brar Signed-off-by: Kukjin Kim --- arch/arm/mach-s3c64xx/dev-spi.c | 2 ++ arch/arm/mach-s5p64x0/dev-spi.c | 4 ++++ arch/arm/mach-s5pc100/dev-spi.c | 3 +++ arch/arm/mach-s5pv210/dev-spi.c | 2 ++ arch/arm/plat-samsung/include/plat/s3c64xx-spi.h | 2 ++ 5 files changed, 13 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c index 82db072cb83..5e6b42089eb 100644 --- a/arch/arm/mach-s3c64xx/dev-spi.c +++ b/arch/arm/mach-s3c64xx/dev-spi.c @@ -88,6 +88,7 @@ static struct s3c64xx_spi_info s3c64xx_spi0_pdata = { .cfg_gpio = s3c64xx_spi_cfg_gpio, .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 13, + .tx_st_done = 21, }; static u64 spi_dmamask = DMA_BIT_MASK(32); @@ -132,6 +133,7 @@ static struct s3c64xx_spi_info s3c64xx_spi1_pdata = { .cfg_gpio = s3c64xx_spi_cfg_gpio, .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 13, + .tx_st_done = 21, }; struct platform_device s3c64xx_device_spi1 = { diff --git a/arch/arm/mach-s5p64x0/dev-spi.c b/arch/arm/mach-s5p64x0/dev-spi.c index e78ee18c76e..ac825e82632 100644 --- a/arch/arm/mach-s5p64x0/dev-spi.c +++ b/arch/arm/mach-s5p64x0/dev-spi.c @@ -112,12 +112,14 @@ static struct s3c64xx_spi_info s5p6440_spi0_pdata = { .cfg_gpio = s5p6440_spi_cfg_gpio, .fifo_lvl_mask = 0x1ff, .rx_lvl_offset = 15, + .tx_st_done = 25, }; static struct s3c64xx_spi_info s5p6450_spi0_pdata = { .cfg_gpio = s5p6450_spi_cfg_gpio, .fifo_lvl_mask = 0x1ff, .rx_lvl_offset = 15, + .tx_st_done = 25, }; static u64 spi_dmamask = DMA_BIT_MASK(32); @@ -160,12 +162,14 @@ static struct s3c64xx_spi_info s5p6440_spi1_pdata = { .cfg_gpio = s5p6440_spi_cfg_gpio, .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 15, + .tx_st_done = 25, }; static struct s3c64xx_spi_info s5p6450_spi1_pdata = { .cfg_gpio = s5p6450_spi_cfg_gpio, .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 15, + .tx_st_done = 25, }; struct platform_device s5p64x0_device_spi1 = { diff --git a/arch/arm/mach-s5pc100/dev-spi.c b/arch/arm/mach-s5pc100/dev-spi.c index 57b19794d9b..2b58c9a2cf7 100644 --- a/arch/arm/mach-s5pc100/dev-spi.c +++ b/arch/arm/mach-s5pc100/dev-spi.c @@ -90,6 +90,7 @@ static struct s3c64xx_spi_info s5pc100_spi0_pdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 13, .high_speed = 1, + .tx_st_done = 21, }; static u64 spi_dmamask = DMA_BIT_MASK(32); @@ -134,6 +135,7 @@ static struct s3c64xx_spi_info s5pc100_spi1_pdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 13, .high_speed = 1, + .tx_st_done = 21, }; struct platform_device s5pc100_device_spi1 = { @@ -176,6 +178,7 @@ static struct s3c64xx_spi_info s5pc100_spi2_pdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 13, .high_speed = 1, + .tx_st_done = 21, }; struct platform_device s5pc100_device_spi2 = { diff --git a/arch/arm/mach-s5pv210/dev-spi.c b/arch/arm/mach-s5pv210/dev-spi.c index e3249a47e3b..eaf9a7bff7a 100644 --- a/arch/arm/mach-s5pv210/dev-spi.c +++ b/arch/arm/mach-s5pv210/dev-spi.c @@ -85,6 +85,7 @@ static struct s3c64xx_spi_info s5pv210_spi0_pdata = { .fifo_lvl_mask = 0x1ff, .rx_lvl_offset = 15, .high_speed = 1, + .tx_st_done = 25, }; static u64 spi_dmamask = DMA_BIT_MASK(32); @@ -129,6 +130,7 @@ static struct s3c64xx_spi_info s5pv210_spi1_pdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 15, .high_speed = 1, + .tx_st_done = 25, }; struct platform_device s5pv210_device_spi1 = { diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h index 0ffe34a2155..4c16fa3621b 100644 --- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h +++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h @@ -39,6 +39,7 @@ struct s3c64xx_spi_csinfo { * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6 * @rx_lvl_offset: Depends on tx fifo_lvl field and bus number * @high_speed: If the controller supports HIGH_SPEED_EN bit + * @tx_st_done: Depends on tx fifo_lvl field */ struct s3c64xx_spi_info { int src_clk_nr; @@ -53,6 +54,7 @@ struct s3c64xx_spi_info { int fifo_lvl_mask; int rx_lvl_offset; int high_speed; + int tx_st_done; }; /** -- cgit v1.2.3-18-g5258 From 8fa9dd04b7f7ab1807ebcc274601dd8e2d215569 Mon Sep 17 00:00:00 2001 From: Padmavathi Venna Date: Wed, 6 Jul 2011 15:37:08 +0900 Subject: ARM: S5PC100: Fix for compilation error S5PC100 Compilation fails without this patch Signed-off-by: Padmavathi Venna Signed-off-by: Kukjin Kim --- arch/arm/mach-s5pc100/dev-spi.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-s5pc100/dev-spi.c b/arch/arm/mach-s5pc100/dev-spi.c index 57b19794d9b..cd7ca47d874 100644 --- a/arch/arm/mach-s5pc100/dev-spi.c +++ b/arch/arm/mach-s5pc100/dev-spi.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3-18-g5258 From 5f27275edb7082505eaac1c85a15620207351b63 Mon Sep 17 00:00:00 2001 From: Sylwester Nawrocki Date: Wed, 6 Jul 2011 16:04:09 +0900 Subject: ARM: EXYNOS4: Set appropriate I2C device variant Set up a correct I2C bus controller variant name for Exynos4. Without this change the I2C bus driver fails to acquire its clocks. Signed-off-by: Sylwester Nawrocki Signed-off-by: Kyungmin Park Signed-off-by: Kukjin Kim --- arch/arm/mach-exynos4/cpu.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c index 9babe4473e8..bfd621460ab 100644 --- a/arch/arm/mach-exynos4/cpu.c +++ b/arch/arm/mach-exynos4/cpu.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -132,6 +133,11 @@ void __init exynos4_map_io(void) s3c_fimc_setname(1, "exynos4-fimc"); s3c_fimc_setname(2, "exynos4-fimc"); s3c_fimc_setname(3, "exynos4-fimc"); + + /* The I2C bus controllers are directly compatible with s3c2440 */ + s3c_i2c0_setname("s3c2440-i2c"); + s3c_i2c1_setname("s3c2440-i2c"); + s3c_i2c2_setname("s3c2440-i2c"); } void __init exynos4_init_clocks(int xtal) -- cgit v1.2.3-18-g5258 From beb0c9b056b1c23d2029b46a425362e9ccbeba01 Mon Sep 17 00:00:00 2001 From: Paul Parsons Date: Sun, 8 May 2011 01:54:33 +0000 Subject: ARM: pxa: fix PGSR register address calculation The file mfp-pxa2xx.c defines a macro, PGSR(), which translates a gpio bank number to a PGSR register address. The function pxa2xx_mfp_suspend() erroneously passed in a gpio number instead of a gpio bank number. Signed-off-by: Paul Parsons Cc: stable@kernel.org Signed-off-by: Eric Miao --- arch/arm/mach-pxa/mfp-pxa2xx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c index 87ae3129f4f..b27544bcafc 100644 --- a/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/arch/arm/mach-pxa/mfp-pxa2xx.c @@ -347,9 +347,9 @@ static int pxa2xx_mfp_suspend(void) if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && (GPDR(i) & GPIO_bit(i))) { if (GPLR(i) & GPIO_bit(i)) - PGSR(i) |= GPIO_bit(i); + PGSR(gpio_to_bank(i)) |= GPIO_bit(i); else - PGSR(i) &= ~GPIO_bit(i); + PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i); } } -- cgit v1.2.3-18-g5258 From d204b2c5b16df935fa9a546c528e168859fddcc0 Mon Sep 17 00:00:00 2001 From: Lei Wen Date: Tue, 21 Jun 2011 02:54:18 -0700 Subject: ARM: pxa910: correct nand pmu setting The original pair of <0x01db, 208000000> is invalid. Correct to the valid value. Signed-off-by: Lei Wen Cc: stable@kernel.org Signed-off-by: Eric Miao --- arch/arm/mach-mmp/pxa910.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c index 8f92ccd26ed..1464607aa60 100644 --- a/arch/arm/mach-mmp/pxa910.c +++ b/arch/arm/mach-mmp/pxa910.c @@ -110,7 +110,7 @@ static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000); static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000); static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000); -static APMU_CLK(nand, NAND, 0x01db, 208000000); +static APMU_CLK(nand, NAND, 0x19b, 156000000); static APMU_CLK(u2o, USB, 0x1b, 480000000); /* device and clock bindings */ -- cgit v1.2.3-18-g5258 From 6662498e132dfa758925a160fd5ef80a083651c3 Mon Sep 17 00:00:00 2001 From: Lei Wen Date: Tue, 21 Jun 2011 05:37:47 -0700 Subject: ARM: pxa168: correct nand pmu setting The original pair of <0x01db, 208000000> is invalid. Correct it to the valid value. The 6th bit of the NFC APMU register indicates NFC works whether at 156Mhz or 78Mhz. So 0x19b indicates NFC works at 156Mhz, and 0x1db indicates it works at 78Mhz. Signed-off-by: Lei Wen Cc: stable@kernel.org Signed-off-by: Eric Miao --- arch/arm/mach-mmp/pxa168.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c index 72b4e763158..ab9f999106c 100644 --- a/arch/arm/mach-mmp/pxa168.c +++ b/arch/arm/mach-mmp/pxa168.c @@ -79,7 +79,7 @@ static APBC_CLK(ssp4, PXA168_SSP4, 4, 0); static APBC_CLK(ssp5, PXA168_SSP5, 4, 0); static APBC_CLK(keypad, PXA168_KPC, 0, 32000); -static APMU_CLK(nand, NAND, 0x01db, 208000000); +static APMU_CLK(nand, NAND, 0x19b, 156000000); static APMU_CLK(lcd, LCD, 0x7f, 312000000); /* device and clock bindings */ -- cgit v1.2.3-18-g5258 From 2eb5af44b1d22d7f7b715e0b9a4e516eb4451bf9 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 28 Jun 2011 09:53:20 +0100 Subject: ARM: 6979/1: mach-vt8500: add forgotten irq_data conversion This platform has not been converted to 'struct irq_data' when the big pile was done and fails to compile nowadays. Tested on a JayPC-Tablet. Signed-off-by: Wolfram Sang Acked-by: Alexey Charkov Signed-off-by: Russell King --- arch/arm/mach-vt8500/irq.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-vt8500/irq.c b/arch/arm/mach-vt8500/irq.c index 245140c0df1..642de0408f2 100644 --- a/arch/arm/mach-vt8500/irq.c +++ b/arch/arm/mach-vt8500/irq.c @@ -39,9 +39,10 @@ static void __iomem *ic_regbase; static void __iomem *sic_regbase; -static void vt8500_irq_mask(unsigned int irq) +static void vt8500_irq_mask(struct irq_data *d) { void __iomem *base = ic_regbase; + unsigned irq = d->irq; u8 edge; if (irq >= 64) { @@ -64,9 +65,10 @@ static void vt8500_irq_mask(unsigned int irq) } } -static void vt8500_irq_unmask(unsigned int irq) +static void vt8500_irq_unmask(struct irq_data *d) { void __iomem *base = ic_regbase; + unsigned irq = d->irq; u8 dctr; if (irq >= 64) { @@ -78,10 +80,11 @@ static void vt8500_irq_unmask(unsigned int irq) writeb(dctr, base + VT8500_IC_DCTR + irq); } -static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type) +static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type) { void __iomem *base = ic_regbase; - unsigned int orig_irq = irq; + unsigned irq = d->irq; + unsigned orig_irq = irq; u8 dctr; if (irq >= 64) { @@ -114,11 +117,11 @@ static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type) } static struct irq_chip vt8500_irq_chip = { - .name = "vt8500", - .ack = vt8500_irq_mask, - .mask = vt8500_irq_mask, - .unmask = vt8500_irq_unmask, - .set_type = vt8500_irq_set_type, + .name = "vt8500", + .irq_ack = vt8500_irq_mask, + .irq_mask = vt8500_irq_mask, + .irq_unmask = vt8500_irq_unmask, + .irq_set_type = vt8500_irq_set_type, }; void __init vt8500_init_irq(void) -- cgit v1.2.3-18-g5258 From 186dcaa448c0a7a99933efac2af225fc4fe82c53 Mon Sep 17 00:00:00 2001 From: Petr Štetiar Date: Fri, 17 Jun 2011 11:10:04 +0100 Subject: ARM: 6966/1: ep93xx: fix inverted RTS/DTR signals on uart1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It was discovered by Roberto Bergo, that RTS/DTR signals are inverted after the boot, because it was causing him problems with hardware controlled modem connected on ttyAM0. Todd Valentic came with this patch for the issue. Discussion: http://tech.groups.yahoo.com/group/ts-7000/message/20259 Comments from Petr Štetiar: Sorry, but forget to add Acked-by[1]: 1. https://patchwork.kernel.org/patch/873052/ Cc: Ryan Mallon Cc: Hartley Sweeten Signed-off-by: Todd Valentic Tested-by: Roberto Bergo Signed-off-by: Petr Štetiar Acked-by: H Hartley Sweeten Signed-off-by: Russell King --- arch/arm/mach-ep93xx/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 1d4b65fd673..6659a0d137a 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -251,9 +251,9 @@ static void ep93xx_uart_set_mctrl(struct amba_device *dev, unsigned int mcr; mcr = 0; - if (!(mctrl & TIOCM_RTS)) + if (mctrl & TIOCM_RTS) mcr |= 2; - if (!(mctrl & TIOCM_DTR)) + if (mctrl & TIOCM_DTR) mcr |= 1; __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET); -- cgit v1.2.3-18-g5258 From 38a8914f9ac2379293944f613e6ca24b61373de8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 1 Jul 2011 14:36:19 +0100 Subject: ARM: 6987/1: l2x0: fix disabling function to avoid deadlock The l2x0_disable function attempts to writel with the l2x0_lock held. This results in deadlock when the writel contains an outer_sync call for the platform since the l2x0_lock is already held by the disable function. A further problem is that disabling the L2 without flushing it first can lead to the spin_lock operation becoming visible after the spin_unlock, causing any subsequent L2 maintenance to deadlock. This patch replaces the writel with a call to writel_relaxed in the disabling code and adds a flush before disabling in the control register, preventing livelock from occurring. Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index ef59099a546..44c086710d2 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -120,17 +120,22 @@ static void l2x0_cache_sync(void) spin_unlock_irqrestore(&l2x0_lock, flags); } -static void l2x0_flush_all(void) +static void __l2x0_flush_all(void) { - unsigned long flags; - - /* clean all ways */ - spin_lock_irqsave(&l2x0_lock, flags); debug_writel(0x03); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); cache_sync(); debug_writel(0x00); +} + +static void l2x0_flush_all(void) +{ + unsigned long flags; + + /* clean all ways */ + spin_lock_irqsave(&l2x0_lock, flags); + __l2x0_flush_all(); spin_unlock_irqrestore(&l2x0_lock, flags); } @@ -266,7 +271,9 @@ static void l2x0_disable(void) unsigned long flags; spin_lock_irqsave(&l2x0_lock, flags); - writel(0, l2x0_base + L2X0_CTRL); + __l2x0_flush_all(); + writel_relaxed(0, l2x0_base + L2X0_CTRL); + dsb(); spin_unlock_irqrestore(&l2x0_lock, flags); } -- cgit v1.2.3-18-g5258 From 9715efb8dc9ffa629bf5a1215b11bf2f2f29908b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 1 Jul 2011 08:23:06 +0100 Subject: ARM: 6984/1: enhance TCM robustness The PB11MPCore reports "3" DTCM banks, but anything above 2 is an "undefined" value, so push this to become 0. Further add some checks if code is compiled to TCM even if there is no D/ITCM present in the system, and if we can really fit the compiled code. We don't do the BUG() since it's not helpful, it's better to deal with non-present TCM dynamically. If there is nothing compiled to the TCM and no TCM is detected, it will now just shut up even if TCM support is enabled. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/kernel/tcm.c | 47 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 39 insertions(+), 8 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index f5cf660eefc..d402d482952 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -134,6 +134,10 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, (tcm_region & 1) ? "" : "not "); } + /* Not much fun you can do with a size 0 bank */ + if (tcm_size == 0) + return 0; + /* Force move the TCM bank to where we want it, enable */ tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; @@ -165,12 +169,20 @@ void __init tcm_init(void) u32 tcm_status = read_cpuid_tcmstatus(); u8 dtcm_banks = (tcm_status >> 16) & 0x03; u8 itcm_banks = (tcm_status & 0x03); + size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data; + size_t itcm_code_sz = &__eitcm_text - &__sitcm_text; char *start; char *end; char *ram; int ret; int i; + /* Values greater than 2 for D/ITCM banks are "reserved" */ + if (dtcm_banks > 2) + dtcm_banks = 0; + if (itcm_banks > 2) + itcm_banks = 0; + /* Setup DTCM if present */ if (dtcm_banks > 0) { for (i = 0; i < dtcm_banks; i++) { @@ -178,6 +190,13 @@ void __init tcm_init(void) if (ret) return; } + /* This means you compiled more code than fits into DTCM */ + if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) { + pr_info("CPU DTCM: %u bytes of code compiled to " + "DTCM but only %lu bytes of DTCM present\n", + dtcm_code_sz, (dtcm_end - DTCM_OFFSET)); + goto no_dtcm; + } dtcm_res.end = dtcm_end - 1; request_resource(&iomem_resource, &dtcm_res); dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; @@ -186,12 +205,15 @@ void __init tcm_init(void) start = &__sdtcm_data; end = &__edtcm_data; ram = &__dtcm_start; - /* This means you compiled more code than fits into DTCM */ - BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET)); - memcpy(start, ram, (end-start)); - pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); + memcpy(start, ram, dtcm_code_sz); + pr_debug("CPU DTCM: copied data from %p - %p\n", + start, end); + } else if (dtcm_code_sz) { + pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no " + "DTCM banks present in CPU\n", dtcm_code_sz); } +no_dtcm: /* Setup ITCM if present */ if (itcm_banks > 0) { for (i = 0; i < itcm_banks; i++) { @@ -199,6 +221,13 @@ void __init tcm_init(void) if (ret) return; } + /* This means you compiled more code than fits into ITCM */ + if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) { + pr_info("CPU ITCM: %u bytes of code compiled to " + "ITCM but only %lu bytes of ITCM present\n", + itcm_code_sz, (itcm_end - ITCM_OFFSET)); + return; + } itcm_res.end = itcm_end - 1; request_resource(&iomem_resource, &itcm_res); itcm_iomap[0].length = itcm_end - ITCM_OFFSET; @@ -207,10 +236,12 @@ void __init tcm_init(void) start = &__sitcm_text; end = &__eitcm_text; ram = &__itcm_start; - /* This means you compiled more code than fits into ITCM */ - BUG_ON((end - start) > (itcm_end - ITCM_OFFSET)); - memcpy(start, ram, (end-start)); - pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); + memcpy(start, ram, itcm_code_sz); + pr_debug("CPU ITCM: copied code from %p - %p\n", + start, end); + } else if (itcm_code_sz) { + pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no " + "ITCM banks present in CPU\n", itcm_code_sz); } } -- cgit v1.2.3-18-g5258 From 201043f227576d42529ddb340746a060a00f57f6 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 1 Jul 2011 08:23:36 +0100 Subject: ARM: 6985/1: export functions to determine the presence of I/DTCM By allowing code to detect whether DTCM or ITCM is present, code paths involving TCM can be avoided when running on platforms that lack it. This is good for creating single kernels across several archs, if some of them utilize TCM but others don't. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/include/asm/tcm.h | 2 ++ arch/arm/kernel/tcm.c | 21 ++++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h index 5929ef5d927..8578d726ad7 100644 --- a/arch/arm/include/asm/tcm.h +++ b/arch/arm/include/asm/tcm.h @@ -27,5 +27,7 @@ void *tcm_alloc(size_t len); void tcm_free(void *addr, size_t len); +bool tcm_dtcm_present(void); +bool tcm_itcm_present(void); #endif diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index d402d482952..30e302d33e0 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -19,6 +19,8 @@ #include "tcm.h" static struct gen_pool *tcm_pool; +static bool dtcm_present; +static bool itcm_present; /* TCM section definitions from the linker */ extern char __itcm_start, __sitcm_text, __eitcm_text; @@ -90,6 +92,18 @@ void tcm_free(void *addr, size_t len) } EXPORT_SYMBOL(tcm_free); +bool tcm_dtcm_present(void) +{ + return dtcm_present; +} +EXPORT_SYMBOL(tcm_dtcm_present); + +bool tcm_itcm_present(void) +{ + return itcm_present; +} +EXPORT_SYMBOL(tcm_itcm_present); + static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, u32 *offset) { @@ -208,6 +222,7 @@ void __init tcm_init(void) memcpy(start, ram, dtcm_code_sz); pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); + dtcm_present = true; } else if (dtcm_code_sz) { pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no " "DTCM banks present in CPU\n", dtcm_code_sz); @@ -239,6 +254,7 @@ no_dtcm: memcpy(start, ram, itcm_code_sz); pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); + itcm_present = true; } else if (itcm_code_sz) { pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no " "ITCM banks present in CPU\n", itcm_code_sz); @@ -252,7 +268,6 @@ no_dtcm: */ static int __init setup_tcm_pool(void) { - u32 tcm_status = read_cpuid_tcmstatus(); u32 dtcm_pool_start = (u32) &__edtcm_data; u32 itcm_pool_start = (u32) &__eitcm_text; int ret; @@ -267,7 +282,7 @@ static int __init setup_tcm_pool(void) pr_debug("Setting up TCM memory pool\n"); /* Add the rest of DTCM to the TCM pool */ - if (tcm_status & (0x03 << 16)) { + if (dtcm_present) { if (dtcm_pool_start < dtcm_end) { ret = gen_pool_add(tcm_pool, dtcm_pool_start, dtcm_end - dtcm_pool_start, -1); @@ -284,7 +299,7 @@ static int __init setup_tcm_pool(void) } /* Add the rest of ITCM to the TCM pool */ - if (tcm_status & 0x03) { + if (itcm_present) { if (itcm_pool_start < itcm_end) { ret = gen_pool_add(tcm_pool, itcm_pool_start, itcm_end - itcm_pool_start, -1); -- cgit v1.2.3-18-g5258 From f022e4e41bb339a8bded1063e718d463904ec716 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 1 Jul 2011 08:24:09 +0100 Subject: ARM: 6986/1: mach-realview: add TCM support for PB1176 Enable TCM support on the RealView PB1176 - we have now taken the precautions necessary to support even multi-board builds of RealView systems with TCM enabled. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mach-realview/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig index b9a9805e482..dba6d0c1fc1 100644 --- a/arch/arm/mach-realview/Kconfig +++ b/arch/arm/mach-realview/Kconfig @@ -50,6 +50,7 @@ config MACH_REALVIEW_PB1176 bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S" select CPU_V6 select ARM_GIC + select HAVE_TCM help Include support for the ARM(R) RealView(R) Platform Baseboard for ARM1176JZF-S. -- cgit v1.2.3-18-g5258 From eca5dc2a0028d64229aef1b7839bcbae1d864c5f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 1 Jul 2011 14:37:08 +0100 Subject: ARM: 6988/1: multi-cpu: remove arguments from CPU proc macros The macros for invoking functions via the processor struct in the MULTI_CPU case define the arguments as part of the macros, making it impossible to take the address of those functions. This patch removes the arguments from the macro definitions so that we can take the address of these functions like we can for the !MULTI_CPU case. Reported-by: Frank Hofmann Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/proc-fns.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8ec535e11fd..633d1cb84d8 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -82,13 +82,13 @@ extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); #else -#define cpu_proc_init() processor._proc_init() -#define cpu_proc_fin() processor._proc_fin() -#define cpu_reset(addr) processor.reset(addr) -#define cpu_do_idle() processor._do_idle() -#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) -#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) -#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) +#define cpu_proc_init processor._proc_init +#define cpu_proc_fin processor._proc_fin +#define cpu_reset processor.reset +#define cpu_do_idle processor._do_idle +#define cpu_dcache_clean_area processor.dcache_clean_area +#define cpu_set_pte_ext processor.set_pte_ext +#define cpu_do_switch_mm processor.switch_mm #endif extern void cpu_resume(void); -- cgit v1.2.3-18-g5258 From 10cdc7e5129ac26aefece4c7a7d5b1a0285237c2 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 13 Jun 2011 15:28:53 +0100 Subject: ARM: 6960/1: allow enabling SCU code on UP The scu_power_mode function can be used on UP builds as it drives signals to an SOC power controller. So make it selectable for !SMP. Signed-off-by: Rob Herring Signed-off-by: Russell King --- arch/arm/Kconfig | 1 - arch/arm/kernel/smp_scu.c | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index cd8f520dd03..84fda2bebd7 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1349,7 +1349,6 @@ config SMP_ON_UP config HAVE_ARM_SCU bool - depends on SMP help This option enables support for the ARM system coherency unit diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index a1e757c3439..79ed5e7f204 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -20,6 +20,7 @@ #define SCU_INVALIDATE 0x0c #define SCU_FPGA_REVISION 0x10 +#ifdef CONFIG_SMP /* * Get the number of CPU cores from the SCU configuration */ @@ -50,6 +51,7 @@ void __init scu_enable(void __iomem *scu_base) */ flush_cache_all(); } +#endif /* * Set the executing CPUs power mode as defined. This will be in -- cgit v1.2.3-18-g5258 From 7fa22bd5460bb2021729fa5a1012c60b9b3a56e2 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 7 Jul 2011 01:56:51 +0100 Subject: ARM: 6993/1: platsmp: Allow secondary cpu hotplug with maxcpus=1 If an ARM system has multiple cpus in the same socket and the kernel is booted with maxcpus=1, secondary cpus are possible but not present due to how platform_smp_prepare_cpus() is called. Since most typical ARM processors don't actually support physical hotplug, initialize the present map to be equal to the possible map in generic ARM SMP code. Also, always call platform_smp_prepare_cpus() as long as max_cpus is non-zero (0 means no SMP) to allow platform code to do any SMP setup. After applying this patch it's possible to boot an ARM system with maxcpus=1 on the command line and then hotplug in secondary cpus via sysfs. This is more in line with how x86 does things. Signed-off-by: Stephen Boyd Cc: Paul Mundt Cc: Kukjin Kim Cc: David Brown Cc: Tony Lindgren Cc: Srinidhi Kasagar Cc: Linus Walleij Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 11 +++++++++-- arch/arm/mach-exynos4/platsmp.c | 8 -------- arch/arm/mach-msm/platsmp.c | 8 -------- arch/arm/mach-omap2/omap-smp.c | 8 -------- arch/arm/mach-realview/platsmp.c | 8 -------- arch/arm/mach-shmobile/platsmp.c | 5 ----- arch/arm/mach-tegra/platsmp.c | 8 -------- arch/arm/mach-ux500/platsmp.c | 8 -------- arch/arm/mach-vexpress/ct-ca9x4.c | 4 ---- 9 files changed, 9 insertions(+), 59 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 344e52b16c8..0ffcf5c0da4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -361,14 +361,21 @@ void __init smp_prepare_cpus(unsigned int max_cpus) */ if (max_cpus > ncores) max_cpus = ncores; - - if (max_cpus > 1) { + if (ncores > 1 && max_cpus) { /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. */ percpu_timer_setup(); + /* + * Initialise the present map, which describes the set of CPUs + * actually populated at the present time. A platform should + * re-initialize the map in platform_smp_prepare_cpus() if + * present != possible (e.g. physical hotplug). + */ + init_cpu_present(&cpu_possible_map); + /* * Initialise the SCU if there are more than one CPU * and let them know where to start. diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c index c5e65a02be8..b68d5bdf04c 100644 --- a/arch/arm/mach-exynos4/platsmp.c +++ b/arch/arm/mach-exynos4/platsmp.c @@ -154,14 +154,6 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); scu_enable(scu_base_addr()); diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c index 2034098cf01..315b9f36532 100644 --- a/arch/arm/mach-msm/platsmp.c +++ b/arch/arm/mach-msm/platsmp.c @@ -157,12 +157,4 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); } diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index ecfe93c4b58..ce65e9329c7 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -125,14 +125,6 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); /* * Initialise the SCU and wake up the secondary core using diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c index 963bf0d8119..4ae943bafa9 100644 --- a/arch/arm/mach-realview/platsmp.c +++ b/arch/arm/mach-realview/platsmp.c @@ -68,14 +68,6 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); scu_enable(scu_base_addr()); diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c index f3888feb1c6..66f980625a3 100644 --- a/arch/arm/mach-shmobile/platsmp.c +++ b/arch/arm/mach-shmobile/platsmp.c @@ -64,10 +64,5 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); - shmobile_smp_prepare_cpus(); } diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c index b8ae3c978de..1a594dce8fb 100644 --- a/arch/arm/mach-tegra/platsmp.c +++ b/arch/arm/mach-tegra/platsmp.c @@ -129,14 +129,6 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); scu_enable(scu_base); } diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c index 0c527fe2ceb..a33df5f4c27 100644 --- a/arch/arm/mach-ux500/platsmp.c +++ b/arch/arm/mach-ux500/platsmp.c @@ -172,14 +172,6 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - int i; - - /* - * Initialise the present map, which describes the set of CPUs - * actually populated at the present time. - */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); scu_enable(scu_base_addr()); wakeup_secondary(); diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 765a71ff7f3..bfd32f52c2d 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c @@ -229,10 +229,6 @@ static void ct_ca9x4_init_cpu_map(void) static void ct_ca9x4_smp_enable(unsigned int max_cpus) { - int i; - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); - scu_enable(MMIO_P2V(A9_MPCORE_SCU)); } #endif -- cgit v1.2.3-18-g5258 From 90c5ffe592ff3b33afe2bdfe5e9ec630fc599e32 Mon Sep 17 00:00:00 2001 From: Vitaly Kuzmichev Date: Thu, 7 Jul 2011 14:56:05 +0100 Subject: ARM: 6994/1: smp_twd: Fix typo in 'twd_timer_rate' printing To get hundredths of MHz the rate needs to be divided by 10'000. Here is an example: twd_timer_rate = 123456789 Before the patch: twd_timer_rate / 1000000 = 123 (twd_timer_rate / 1000000) % 100 = 23 Result: 123.23MHz. After being fixed: twd_timer_rate / 1000000 = 123 (twd_timer_rate / 10000) % 100 = 45 Result: 123.45MHz. Signed-off-by: Vitaly Kuzmichev Signed-off-by: Russell King --- arch/arm/kernel/smp_twd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 60636f499cb..2c277d40cee 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -115,7 +115,7 @@ static void __cpuinit twd_calibrate_rate(void) twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, - (twd_timer_rate / 1000000) % 100); + (twd_timer_rate / 10000) % 100); } } -- cgit v1.2.3-18-g5258 From d30e1521b2afb5e6f21ca8bc1a4b6ec2afc93597 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Wed, 6 Jul 2011 21:07:07 +0200 Subject: arm: mach-vt8500: add forgotten irq_data conversion This platform has not been converted to 'struct irq_data' when the big pile was done. It fails to compile nowadays, because the compatibility code has gone. CC arch/arm/mach-vt8500/irq.o arch/arm/mach-vt8500/irq.c:118:2: error: unknown field 'ack' specified in initializer arch/arm/mach-vt8500/irq.c:118:2: warning: initialization from incompatible pointer type arch/arm/mach-vt8500/irq.c:119:2: error: unknown field 'mask' specified in initializer arch/arm/mach-vt8500/irq.c:119:2: warning: initialization from incompatible pointer type arch/arm/mach-vt8500/irq.c:120:2: error: unknown field 'unmask' specified in initializer arch/arm/mach-vt8500/irq.c:120:2: warning: initialization from incompatible pointer type arch/arm/mach-vt8500/irq.c:121:2: error: unknown field 'set_type' specified in initializer arch/arm/mach-vt8500/irq.c:121:2: warning: initialization from incompatible pointer type make[1]: *** [arch/arm/mach-vt8500/irq.o] Error 1 Add the missing conversion. Tested on a JayPC-Tablet. Signed-off-by: Wolfram Sang Acked-by: Alexey Charkov Cc: Russell King Cc: Thomas Gleixner Cc: Andrew Morton Signed-off-by: Arnd Bergmann --- arch/arm/mach-vt8500/irq.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-vt8500/irq.c b/arch/arm/mach-vt8500/irq.c index 245140c0df1..642de0408f2 100644 --- a/arch/arm/mach-vt8500/irq.c +++ b/arch/arm/mach-vt8500/irq.c @@ -39,9 +39,10 @@ static void __iomem *ic_regbase; static void __iomem *sic_regbase; -static void vt8500_irq_mask(unsigned int irq) +static void vt8500_irq_mask(struct irq_data *d) { void __iomem *base = ic_regbase; + unsigned irq = d->irq; u8 edge; if (irq >= 64) { @@ -64,9 +65,10 @@ static void vt8500_irq_mask(unsigned int irq) } } -static void vt8500_irq_unmask(unsigned int irq) +static void vt8500_irq_unmask(struct irq_data *d) { void __iomem *base = ic_regbase; + unsigned irq = d->irq; u8 dctr; if (irq >= 64) { @@ -78,10 +80,11 @@ static void vt8500_irq_unmask(unsigned int irq) writeb(dctr, base + VT8500_IC_DCTR + irq); } -static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type) +static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type) { void __iomem *base = ic_regbase; - unsigned int orig_irq = irq; + unsigned irq = d->irq; + unsigned orig_irq = irq; u8 dctr; if (irq >= 64) { @@ -114,11 +117,11 @@ static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type) } static struct irq_chip vt8500_irq_chip = { - .name = "vt8500", - .ack = vt8500_irq_mask, - .mask = vt8500_irq_mask, - .unmask = vt8500_irq_unmask, - .set_type = vt8500_irq_set_type, + .name = "vt8500", + .irq_ack = vt8500_irq_mask, + .irq_mask = vt8500_irq_mask, + .irq_unmask = vt8500_irq_unmask, + .irq_set_type = vt8500_irq_set_type, }; void __init vt8500_init_irq(void) -- cgit v1.2.3-18-g5258 From 659fb32d1b67476f4ade25e9ea0e2642a5b9c4b5 Mon Sep 17 00:00:00 2001 From: Simon Guinot Date: Wed, 6 Jul 2011 12:41:31 -0400 Subject: genirq: replace irq_gc_ack() with {set,clr}_bit variants (fwd) This fixes a regression introduced by e59347a "arm: orion: Use generic irq chip". Depending on the device, interrupts acknowledgement is done by setting or by clearing a dedicated register. Replace irq_gc_ack() with some {set,clr}_bit variants allows to handle both cases. Note that this patch affects the following SoCs: Davinci, Samsung and Orion. Except for this last, the change is minor: irq_gc_ack() is just renamed into irq_gc_ack_set_bit(). For the Orion SoCs, the edge GPIO interrupts support is currently broken. irq_gc_ack() try to acknowledge a such interrupt by setting the corresponding cause register bit. The Orion GPIO device expect the opposite. To fix this issue, the irq_gc_ack_clr_bit() variant is used. Tested on Network Space v2. Reported-by: Joey Oravec Signed-off-by: Simon Guinot Signed-off-by: Arnd Bergmann --- arch/arm/mach-davinci/irq.c | 2 +- arch/arm/plat-orion/gpio.c | 2 +- arch/arm/plat-s5p/irq-gpioint.c | 2 +- arch/arm/plat-samsung/irq-uart.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c index bfe68ec4e1a..d8c1af02593 100644 --- a/arch/arm/mach-davinci/irq.c +++ b/arch/arm/mach-davinci/irq.c @@ -53,7 +53,7 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); ct = gc->chip_types; - ct->chip.irq_ack = irq_gc_ack; + ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 5b4fffab1eb..41ab97ebe4c 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -432,7 +432,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio, ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF; ct->regs.ack = GPIO_EDGE_CAUSE_OFF; ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; - ct->chip.irq_ack = irq_gc_ack; + ct->chip.irq_ack = irq_gc_ack_clr_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_set_type = gpio_irq_set_type; diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c index 135abda31c9..327ab9f662e 100644 --- a/arch/arm/plat-s5p/irq-gpioint.c +++ b/arch/arm/plat-s5p/irq-gpioint.c @@ -152,7 +152,7 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip) if (!gc) return -ENOMEM; ct = gc->chip_types; - ct->chip.irq_ack = irq_gc_ack; + ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct->chip.irq_set_type = s5p_gpioint_set_type, diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c index 32582c0958e..0e46588d847 100644 --- a/arch/arm/plat-samsung/irq-uart.c +++ b/arch/arm/plat-samsung/irq-uart.c @@ -55,7 +55,7 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq) gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base, handle_level_irq); ct = gc->chip_types; - ct->chip.irq_ack = irq_gc_ack; + ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct->regs.ack = S3C64XX_UINTP; -- cgit v1.2.3-18-g5258 From c1f2d9991046769aa14a904ea2bf5434daf86262 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 5 Jul 2011 23:59:56 +0100 Subject: ARM: ensure tag tables are const Nothing should ever modify a tag table entry, so mark these const. Acked-by: Nicolas Pitre Tested-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/include/asm/setup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index ee2ad8ae07a..c3b5c796d81 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -187,7 +187,7 @@ struct tagtable { #define __tag __used __attribute__((__section__(".taglist.init"))) #define __tagtable(tag, fn) \ -static struct tagtable __tagtable_##fn __tag = { tag, fn } +static const struct tagtable __tagtable_##fn __tag = { tag, fn } /* * Memory map description -- cgit v1.2.3-18-g5258 From 3002b41bc6e5d66f7c1b5fd604f8f413e9bb37a5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 26 May 2011 11:40:54 +0100 Subject: ARM: decompressor: use better output sections Place read-only data in a .rodata output section, and the compressed piggy data in .piggydata. Place the .got.plt section before the .got section as is standard ELF practise. This allows the piggydata to be more easily extracted from the compressed vmlinux file for verification purposes. Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/boot/compressed/vmlinux.lds.in | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index ea80abe7884..4e728834a1b 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in @@ -33,20 +33,24 @@ SECTIONS *(.text.*) *(.fixup) *(.gnu.warning) + *(.glue_7t) + *(.glue_7) + } + .rodata : { *(.rodata) *(.rodata.*) - *(.glue_7) - *(.glue_7t) + } + .piggydata : { *(.piggydata) - . = ALIGN(4); } + . = ALIGN(4); _etext = .; + .got.plt : { *(.got.plt) } _got_start = .; .got : { *(.got) } _got_end = .; - .got.plt : { *(.got.plt) } _edata = .; . = BSS_START; -- cgit v1.2.3-18-g5258 From 39df88872f64b8a7c438861460063eadf2ba9011 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 26 May 2011 11:25:33 +0100 Subject: ARM: vmlinux.lds: move discarded sections to beginning Rather than scattering the discarded sections throughout the linker file, move them to the start. Acked-by: Nicolas Pitre Tested-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 47 ++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 25 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index e5287f21bad..cb46a9bad4f 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -38,6 +38,28 @@ jiffies = jiffies_64 + 4; SECTIONS { + /* + * unwind exit sections must be discarded before the rest of the + * unwind sections get included. + */ + /DISCARD/ : { + *(.ARM.exidx.exit.text) + *(.ARM.extab.exit.text) + ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) + ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) +#ifndef CONFIG_HOTPLUG + *(.ARM.exidx.devexit.text) + *(.ARM.extab.devexit.text) +#endif +#ifndef CONFIG_MMU + *(.fixup) + *(__ex_table) +#endif +#ifndef CONFIG_SMP_ON_UP + *(.alt.smp.init) +#endif + } + #ifdef CONFIG_XIP_KERNEL . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); #else @@ -89,25 +111,6 @@ SECTIONS __init_end = .; #endif - /* - * unwind exit sections must be discarded before the rest of the - * unwind sections get included. - */ - /DISCARD/ : { - *(.ARM.exidx.exit.text) - *(.ARM.extab.exit.text) - ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) - ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) -#ifndef CONFIG_HOTPLUG - *(.ARM.exidx.devexit.text) - *(.ARM.extab.devexit.text) -#endif -#ifndef CONFIG_MMU - *(.fixup) - *(__ex_table) -#endif - } - .text : { /* Real text segment */ _text = .; /* Text and read-only data */ __exception_text_start = .; @@ -270,12 +273,6 @@ SECTIONS /* Default discards */ DISCARDS - -#ifndef CONFIG_SMP_ON_UP - /DISCARD/ : { - *(.alt.smp.init) - } -#endif } /* -- cgit v1.2.3-18-g5258 From 1604d79d372bcf0cf1aebcbdee251bd0f3d56665 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 5 Jul 2011 22:56:41 +0100 Subject: ARM: vmlinux.lds: rearrange .init output section Keep the various linker tables as separate output sections rather than combining them together into one big .init section. This makes the 'vmlinux' easier to see what is placed where. Acked-by: Nicolas Pitre Tested-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 47 ++++++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 18 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index cb46a9bad4f..c8bb9b77c2e 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -65,48 +65,59 @@ SECTIONS #else . = PAGE_OFFSET + TEXT_OFFSET; #endif - - .init : { /* Init code and data */ + .head.text : { _stext = .; - _sinittext = .; - HEAD_TEXT - INIT_TEXT - ARM_EXIT_KEEP(EXIT_TEXT) - _einittext = .; + HEAD_TEXT + } + INIT_TEXT_SECTION(8) + .exit.text : { + ARM_EXIT_KEEP(EXIT_TEXT) + } + .init.proc.info : { ARM_CPU_DISCARD(PROC_INFO) + } + .init.arch.info : { __arch_info_begin = .; - *(.arch.info.init) + *(.arch.info.init) __arch_info_end = .; + } + .init.tagtable : { __tagtable_begin = .; - *(.taglist.init) + *(.taglist.init) __tagtable_end = .; + } #ifdef CONFIG_SMP_ON_UP + .init.smpalt : { __smpalt_begin = .; - *(.alt.smp.init) + *(.alt.smp.init) __smpalt_end = .; + } #endif - + .init.pv_table : { __pv_table_begin = .; - *(.pv_table) + *(.pv_table) __pv_table_end = .; - + } + .init.data : { +#ifndef CONFIG_XIP_KERNEL + INIT_DATA +#endif INIT_SETUP(16) - INIT_CALLS CON_INITCALL SECURITY_INITCALL INIT_RAM_FS - + } #ifndef CONFIG_XIP_KERNEL - __init_begin = _stext; - INIT_DATA + .exit.data : { ARM_EXIT_KEEP(EXIT_DATA) -#endif } +#endif PERCPU_SECTION(32) #ifndef CONFIG_XIP_KERNEL + __init_begin = _stext; . = ALIGN(PAGE_SIZE); __init_end = .; #endif -- cgit v1.2.3-18-g5258 From 43fc9d2fa5585adfadd0fdc06a20626727cf985a Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 6 Jul 2011 00:01:20 +0100 Subject: ARM: vmlinux.lds: remove .rodata/.rodata1 from main .text segment RODATA() already handles these sections, so allow it to take care of them for us. Acked-by: Nicolas Pitre Tested-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index c8bb9b77c2e..fa812d03ff4 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -136,8 +136,6 @@ SECTIONS *(.fixup) #endif *(.gnu.warning) - *(.rodata) - *(.rodata.*) *(.glue_7) *(.glue_7t) . = ALIGN(4); -- cgit v1.2.3-18-g5258 From 3835d69a6c7048a28d0aea3cb8403d5e83a0f867 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 6 Jul 2011 10:39:34 +0100 Subject: ARM: vmlinux.lds: move init sections between text and data sections Place the init sections between the text and data sections. This means all code is grouped together at the beginning of the kernel image, and all data is at the end of the image. This avoids problems with the 24-bit branch instruction relocations becoming invalid with large initramfs images. Acked-by: Nicolas Pitre Tested-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 96 +++++++++++++++++++++---------------------- arch/arm/mm/init.c | 4 +- 2 files changed, 50 insertions(+), 50 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index fa812d03ff4..18574b7dbc1 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -69,6 +69,53 @@ SECTIONS _stext = .; HEAD_TEXT } + .text : { /* Real text segment */ + _text = .; /* Text and read-only data */ + __exception_text_start = .; + *(.exception.text) + __exception_text_end = .; + IRQENTRY_TEXT + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT +#ifdef CONFIG_MMU + *(.fixup) +#endif + *(.gnu.warning) + *(.glue_7) + *(.glue_7t) + . = ALIGN(4); + *(.got) /* Global offset table */ + ARM_CPU_KEEP(PROC_INFO) + } + + RO_DATA(PAGE_SIZE) + +#ifdef CONFIG_ARM_UNWIND + /* + * Stack unwinding tables + */ + . = ALIGN(8); + .ARM.unwind_idx : { + __start_unwind_idx = .; + *(.ARM.exidx*) + __stop_unwind_idx = .; + } + .ARM.unwind_tab : { + __start_unwind_tab = .; + *(.ARM.extab*) + __stop_unwind_tab = .; + } +#endif + + _etext = .; /* End of text and rodata section */ + +#ifndef CONFIG_XIP_KERNEL + . = ALIGN(PAGE_SIZE); + __init_begin = .; +#endif + INIT_TEXT_SECTION(8) .exit.text : { ARM_EXIT_KEEP(EXIT_TEXT) @@ -116,58 +163,11 @@ SECTIONS PERCPU_SECTION(32) -#ifndef CONFIG_XIP_KERNEL - __init_begin = _stext; - . = ALIGN(PAGE_SIZE); - __init_end = .; -#endif - - .text : { /* Real text segment */ - _text = .; /* Text and read-only data */ - __exception_text_start = .; - *(.exception.text) - __exception_text_end = .; - IRQENTRY_TEXT - TEXT_TEXT - SCHED_TEXT - LOCK_TEXT - KPROBES_TEXT -#ifdef CONFIG_MMU - *(.fixup) -#endif - *(.gnu.warning) - *(.glue_7) - *(.glue_7t) - . = ALIGN(4); - *(.got) /* Global offset table */ - ARM_CPU_KEEP(PROC_INFO) - } - - RO_DATA(PAGE_SIZE) - -#ifdef CONFIG_ARM_UNWIND - /* - * Stack unwinding tables - */ - . = ALIGN(8); - .ARM.unwind_idx : { - __start_unwind_idx = .; - *(.ARM.exidx*) - __stop_unwind_idx = .; - } - .ARM.unwind_tab : { - __start_unwind_tab = .; - *(.ARM.extab*) - __stop_unwind_tab = .; - } -#endif - - _etext = .; /* End of text and rodata section */ - #ifdef CONFIG_XIP_KERNEL __data_loc = ALIGN(4); /* location in binary */ . = PAGE_OFFSET + TEXT_OFFSET; #else + __init_end = .; . = ALIGN(THREAD_SIZE); __data_loc = .; #endif diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c19571c40a2..b8e89124315 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -639,8 +639,8 @@ void __init mem_init(void) " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" - " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .text : 0x%p" " - 0x%p" " (%4d kB)\n" + " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .data : 0x%p" " - 0x%p" " (%4d kB)\n" " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", @@ -662,8 +662,8 @@ void __init mem_init(void) #endif MLM(MODULES_VADDR, MODULES_END), - MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_text, _etext), + MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_sdata, _edata), MLK_ROUNDUP(__bss_start, __bss_stop)); -- cgit v1.2.3-18-g5258 From e2f81844efa2d44d326bef48e1c9e48926162bc6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 6 Jul 2011 10:53:22 +0100 Subject: ARM: vmlinux.lds: use _text and _stext the same way as x86 x86 uses _text to mark the start of the kernel image including the head text, and _stext to mark the start of the .text section. Change our vmlinux.lds to conform. An audit of the places which use _stext and _text in arch/arm indicates no users of either symbol are impacted by this change. It does mean a slight change to /proc/iomem output. Acked-by: Nicolas Pitre Tested-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 18574b7dbc1..bf977f8514f 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -66,11 +66,11 @@ SECTIONS . = PAGE_OFFSET + TEXT_OFFSET; #endif .head.text : { - _stext = .; + _text = .; HEAD_TEXT } .text : { /* Real text segment */ - _text = .; /* Text and read-only data */ + _stext = .; /* Text and read-only data */ __exception_text_start = .; *(.exception.text) __exception_text_end = .; -- cgit v1.2.3-18-g5258 From 5838e9b8dada491278db48ff9162e25125fa89d6 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 8 Jul 2011 14:33:02 +0900 Subject: ARM: S3C24XX: don't use uninitialized variable in dma.c Commit 8970ef47 (S3C24XX: Remove hardware specific registers from DMA calls) removed the parameter dcon in s3c2410_dma_config() and calculates it on its own. So the debug-output for the old parameter can go, too. Fixes: arch/arm/plat-s3c24xx/dma.c: In function 's3c2410_dma_config': arch/arm/plat-s3c24xx/dma.c:1030:2: warning: 'dcon' is used uninitialized in this function Signed-off-by: Wolfram Sang Cc: Ben Dooks Signed-off-by: Kukjin Kim --- arch/arm/plat-s3c24xx/dma.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index 2abf9660bc6..a5b3684e115 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -1027,17 +1027,13 @@ int s3c2410_dma_config(unsigned int channel, struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned int dcon; - pr_debug("%s: chan=%d, xfer_unit=%d, dcon=%08x\n", - __func__, channel, xferunit, dcon); + pr_debug("%s: chan=%d, xfer_unit=%d\n", __func__, channel, xferunit); if (chan == NULL) return -EINVAL; - pr_debug("%s: Initial dcon is %08x\n", __func__, dcon); - dcon = chan->dcon & dma_sel.dcon_mask; - - pr_debug("%s: New dcon is %08x\n", __func__, dcon); + pr_debug("%s: dcon is %08x\n", __func__, dcon); switch (chan->req_ch) { case DMACH_I2S_IN: -- cgit v1.2.3-18-g5258 From cb26a7b1c18857d14913040b45f3fe51b513f936 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 8 Jul 2011 14:33:02 +0900 Subject: ARM: S3C24XX: drop return codes in void function of dma.c Commit bb072c3c (ARM / Samsung: Use struct syscore_ops for "core" power management) turned s3c2410_dma_resume_chan() from int to void. So, drop the actual return values, too. Fixes: arch/arm/plat-s3c24xx/dma.c: In function 's3c2410_dma_resume_chan': arch/arm/plat-s3c24xx/dma.c:1238:3: warning: 'return' with a value, in function returning void arch/arm/plat-s3c24xx/dma.c:1250:2: warning: 'return' with a value, in function returning void Signed-off-by: Wolfram Sang Acked-by: Rafael J. Wysocki Signed-off-by: Kukjin Kim --- arch/arm/plat-s3c24xx/dma.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index a5b3684e115..a79a8ccd25f 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -1231,7 +1231,7 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) /* restore channel's hardware configuration */ if (!cp->in_use) - return 0; + return; printk(KERN_INFO "dma%d: restoring configuration\n", cp->number); @@ -1242,8 +1242,6 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) if (cp->map != NULL) dma_sel.select(cp, cp->map); - - return 0; } static void s3c2410_dma_resume(void) -- cgit v1.2.3-18-g5258 From d34c1fcddc3a159d52576c656f8caabc0cf4894b Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 8 Jul 2011 18:16:56 +0900 Subject: ARM: S3C2440: fix section mismatch on mini2440 If mini2440_init() is in __init, mini2440_parse_features() should also be in __init. Fixes: (.text+0x9adc): Section mismatch in reference from the function mini2440_parse_features.clone.0() to the (unknown reference) .init.data:(unknown) The function mini2440_parse_features.clone.0() references the (unknown reference) __initdata (unknown). Signed-off-by: Wolfram Sang Cc: Michel Pollet Signed-off-by: Kukjin Kim --- arch/arm/mach-s3c2440/mach-mini2440.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c index dd3120df09f..fc2dc0b3d4f 100644 --- a/arch/arm/mach-s3c2440/mach-mini2440.c +++ b/arch/arm/mach-s3c2440/mach-mini2440.c @@ -552,7 +552,7 @@ struct mini2440_features_t { struct platform_device *optional[8]; }; -static void mini2440_parse_features( +static void __init mini2440_parse_features( struct mini2440_features_t * features, const char * features_str ) { -- cgit v1.2.3-18-g5258 From 54d525735140e930d87c5afd1b0b3393ffdac021 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 7 Jul 2011 18:43:36 +0100 Subject: ARM: 6996/1: mm: Poison freed init memory Poisoning __init marked memory can be useful when tracking down obscure memory corruption bugs. Therefore, poison init memory with 0xe7fddef0 to catch bugs earlier. The poison value is an undefined instruction in ARM mode and branch to an undefined instruction in Thumb mode. Signed-off-by: Stephen Boyd Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/init.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 2c2cce9cd8c..fdc87f9bda5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -416,6 +416,17 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) return pages; } +/* + * Poison init memory with an undefined instruction (ARM) or a branch to an + * undefined instruction (Thumb). + */ +static inline void poison_init_mem(void *s, size_t count) +{ + u32 *p = (u32 *)s; + while ((count = count - 4)) + *p++ = 0xe7fddef0; +} + static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { @@ -696,11 +707,13 @@ void free_initmem(void) #ifdef CONFIG_HAVE_TCM extern char __tcm_start, __tcm_end; + poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), __phys_to_pfn(__pa(&__tcm_end)), "TCM link"); #endif + poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), __phys_to_pfn(__pa(__init_end)), @@ -713,10 +726,12 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) + if (!keep_initrd) { + poison_init_mem((void *)start, PAGE_ALIGN(end) - start); totalram_pages += free_area(__phys_to_pfn(__pa(start)), __phys_to_pfn(__pa(end)), "initrd"); + } } static int __init keepinitrd_setup(char *__unused) -- cgit v1.2.3-18-g5258 From 3973c337759cd201773a0ecc7b6f39f1ea2a6287 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 8 Jul 2011 14:02:21 +0100 Subject: ARM: dmabounce: simplify dma_set_mask() Simplify the dmabounce specific code in dma_set_mask(). We can just omit setting the dma mask if dmabounce is enabled (we will have already set dma mask via callbacks when the device is created in that case.) Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 4ad25337f92..94662f4c9ea 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -133,18 +133,12 @@ static inline int dma_supported(struct device *dev, u64 mask) static inline int dma_set_mask(struct device *dev, u64 dma_mask) { -#ifdef CONFIG_DMABOUNCE - if (dev->archdata.dmabounce) { - if (dma_mask >= ISA_DMA_THRESHOLD) - return 0; - else - return -EIO; - } -#endif if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; +#ifndef CONFIG_DMABOUNCE *dev->dma_mask = dma_mask; +#endif return 0; } -- cgit v1.2.3-18-g5258 From c7e89b16eb90e7bdf0d71bd5ba265ce8d424f30b Mon Sep 17 00:00:00 2001 From: Heechul Yun Date: Fri, 8 Jul 2011 13:32:34 +0100 Subject: ARM: 6995/2: mm: remove unnecessary cache flush on v6 copypage Originally introduced to maintain coherency between icache and dcache in v6 nonaliasing mode. This is now handled by __sync_icache_dcache since c0177800, therefore unnecessary in this function. Signed-off-by: Heechul Yun Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/copypage-v6.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index bdba6c65c90..63cca009713 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -41,7 +41,6 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, kfrom = kmap_atomic(from, KM_USER0); kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); - __cpuc_flush_dcache_area(kto, PAGE_SIZE); kunmap_atomic(kto, KM_USER1); kunmap_atomic(kfrom, KM_USER0); } -- cgit v1.2.3-18-g5258 From af61bdf035e2e4dd646b37b270bd558188a127c0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 9 Jul 2011 13:44:04 +0100 Subject: ARM: vfp: rename last_VFP_context to vfp_current_hw_state Rename the slightly confusing 'last_VFP_context' variable to be more descriptive of what it actually is. This variable stores a pointer to the current owner's vfpstate structure for the context held in the VFP hardware. Signed-off-by: Russell King --- arch/arm/vfp/vfphw.S | 10 +++++----- arch/arm/vfp/vfpmodule.c | 36 +++++++++++++++++++++--------------- 2 files changed, 26 insertions(+), 20 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 9897dcfc16d..c75443e204b 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -77,9 +77,9 @@ ENTRY(vfp_support_entry) bne look_for_VFP_exceptions @ VFP is already enabled DBGSTR1 "enable %x", r10 - ldr r3, last_VFP_context_address + ldr r3, vfp_current_hw_state_address orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set - ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer + ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled cmp r4, r10 beq check_for_exception @ we are returning to the same @@ -116,7 +116,7 @@ ENTRY(vfp_support_entry) no_old_VFP_process: DBGSTR1 "load state %p", r10 - str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer + str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer @ Load the saved state back into the VFP VFPFLDMIA r10, r5 @ reload the working registers while @ FPEXC is in a safe state @@ -207,8 +207,8 @@ ENTRY(vfp_save_state) ENDPROC(vfp_save_state) .align -last_VFP_context_address: - .word last_VFP_context +vfp_current_hw_state_address: + .word vfp_current_hw_state .macro tbl_branch, base, tmp, shift #ifdef CONFIG_THUMB2_KERNEL diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index f25e7ec8941..3640351171b 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -33,7 +33,13 @@ void vfp_support_entry(void); void vfp_null_entry(void); void (*vfp_vector)(void) = vfp_null_entry; -union vfp_state *last_VFP_context[NR_CPUS]; + +/* + * The pointer to the vfpstate structure of the thread which currently + * owns the context held in the VFP hardware, or NULL if the hardware + * context is invalid. + */ +union vfp_state *vfp_current_hw_state[NR_CPUS]; /* * Dual-use variable. @@ -57,12 +63,12 @@ static void vfp_thread_flush(struct thread_info *thread) /* * Disable VFP to ensure we initialize it first. We must ensure - * that the modification of last_VFP_context[] and hardware disable + * that the modification of vfp_current_hw_state[] and hardware disable * are done for the same CPU and without preemption. */ cpu = get_cpu(); - if (last_VFP_context[cpu] == vfp) - last_VFP_context[cpu] = NULL; + if (vfp_current_hw_state[cpu] == vfp) + vfp_current_hw_state[cpu] = NULL; fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); put_cpu(); } @@ -73,8 +79,8 @@ static void vfp_thread_exit(struct thread_info *thread) union vfp_state *vfp = &thread->vfpstate; unsigned int cpu = get_cpu(); - if (last_VFP_context[cpu] == vfp) - last_VFP_context[cpu] = NULL; + if (vfp_current_hw_state[cpu] == vfp) + vfp_current_hw_state[cpu] = NULL; put_cpu(); } @@ -129,9 +135,9 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) * case the thread migrates to a different CPU. The * restoring is done lazily. */ - if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { - vfp_save_state(last_VFP_context[cpu], fpexc); - last_VFP_context[cpu]->hard.cpu = cpu; + if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) { + vfp_save_state(vfp_current_hw_state[cpu], fpexc); + vfp_current_hw_state[cpu]->hard.cpu = cpu; } /* * Thread migration, just force the reloading of the @@ -139,7 +145,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) * contain stale data. */ if (thread->vfpstate.hard.cpu != cpu) - last_VFP_context[cpu] = NULL; + vfp_current_hw_state[cpu] = NULL; #endif /* @@ -415,7 +421,7 @@ static int vfp_pm_suspend(void) } /* clear any information we had about last context state */ - memset(last_VFP_context, 0, sizeof(last_VFP_context)); + memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state)); return 0; } @@ -451,7 +457,7 @@ void vfp_sync_hwstate(struct thread_info *thread) * If the thread we're interested in is the current owner of the * hardware VFP state, then we need to save its state. */ - if (last_VFP_context[cpu] == &thread->vfpstate) { + if (vfp_current_hw_state[cpu] == &thread->vfpstate) { u32 fpexc = fmrx(FPEXC); /* @@ -473,7 +479,7 @@ void vfp_flush_hwstate(struct thread_info *thread) * If the thread we're interested in is the current owner of the * hardware VFP state, then we need to save its state. */ - if (last_VFP_context[cpu] == &thread->vfpstate) { + if (vfp_current_hw_state[cpu] == &thread->vfpstate) { u32 fpexc = fmrx(FPEXC); fmxr(FPEXC, fpexc & ~FPEXC_EN); @@ -482,7 +488,7 @@ void vfp_flush_hwstate(struct thread_info *thread) * Set the context to NULL to force a reload the next time * the thread uses the VFP. */ - last_VFP_context[cpu] = NULL; + vfp_current_hw_state[cpu] = NULL; } #ifdef CONFIG_SMP @@ -514,7 +520,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action, { if (action == CPU_DYING || action == CPU_DYING_FROZEN) { unsigned int cpu = (long)hcpu; - last_VFP_context[cpu] = NULL; + vfp_current_hw_state[cpu] = NULL; } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) vfp_enable(NULL); return NOTIFY_OK; -- cgit v1.2.3-18-g5258 From 08409c33d6fdb43fa19d7dbdafd4f280b7835592 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 9 Jul 2011 14:24:36 +0100 Subject: ARM: vfp: rename check_exception to vfp_hw_state_valid Rename this branch to more accurately reflect why its taken, rather than what the following code does. It is the only caller of this code. This helps to clarify following changes, yet this change results in no actual code change. Document the VFP hardware state at the target of this branch. Signed-off-by: Russell King --- arch/arm/vfp/vfphw.S | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index c75443e204b..404538ae591 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -81,11 +81,8 @@ ENTRY(vfp_support_entry) orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled - cmp r4, r10 - beq check_for_exception @ we are returning to the same - @ process, so the registers are - @ still there. In this case, we do - @ not want to drop a pending exception. + cmp r4, r10 @ this thread owns the hw context? + beq vfp_hw_state_valid VFPFMXR FPEXC, r5 @ enable VFP, disable any pending @ exceptions, so we can get at the @@ -132,7 +129,8 @@ no_old_VFP_process: #endif VFPFMXR FPSCR, r5 @ restore status -check_for_exception: +@ The context stored in the VFP hardware is up to date with this thread +vfp_hw_state_valid: tst r1, #FPEXC_EX bne process_exception @ might as well handle the pending @ exception before retrying branch -- cgit v1.2.3-18-g5258 From f8f2a8522a88aacd62a310ce49e8dac530d1b403 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 9 Jul 2011 16:09:43 +0100 Subject: ARM: vfp: fix a hole in VFP thread migration Fix a hole in the VFP thread migration. Lets define two threads. Thread 1, we'll call 'interesting_thread' which is a thread which is running on CPU0, using VFP (so vfp_current_hw_state[0] = &interesting_thread->vfpstate) and gets migrated off to CPU1, where it continues execution of VFP instructions. Thread 2, we'll call 'new_cpu0_thread' which is the thread which takes over on CPU0. This has also been using VFP, and last used VFP on CPU0, but doesn't use it again. The following code will be executed twice: cpu = thread->cpu; /* * On SMP, if VFP is enabled, save the old state in * case the thread migrates to a different CPU. The * restoring is done lazily. */ if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) { vfp_save_state(vfp_current_hw_state[cpu], fpexc); vfp_current_hw_state[cpu]->hard.cpu = cpu; } /* * Thread migration, just force the reloading of the * state on the new CPU in case the VFP registers * contain stale data. */ if (thread->vfpstate.hard.cpu != cpu) vfp_current_hw_state[cpu] = NULL; The first execution will be on CPU0 to switch away from 'interesting_thread'. interesting_thread->cpu will be 0. So, vfp_current_hw_state[0] points at interesting_thread->vfpstate. The hardware state will be saved, along with the CPU number (0) that it was executing on. 'thread' will be 'new_cpu0_thread' with new_cpu0_thread->cpu = 0. Also, because it was executing on CPU0, new_cpu0_thread->vfpstate.hard.cpu = 0, and so the thread migration check is not triggered. This means that vfp_current_hw_state[0] remains pointing at interesting_thread. The second execution will be on CPU1 to switch _to_ 'interesting_thread'. So, 'thread' will be 'interesting_thread' and interesting_thread->cpu now will be 1. The previous thread executing on CPU1 is not relevant to this so we shall ignore that. We get to the thread migration check. Here, we discover that interesting_thread->vfpstate.hard.cpu = 0, yet interesting_thread->cpu is now 1, indicating thread migration. We set vfp_current_hw_state[1] to NULL. So, at this point vfp_current_hw_state[] contains the following: [0] = &interesting_thread->vfpstate [1] = NULL Our interesting thread now executes a VFP instruction, takes a fault which loads the state into the VFP hardware. Now, through the assembly we now have: [0] = &interesting_thread->vfpstate [1] = &interesting_thread->vfpstate CPU1 stops due to ptrace (and so saves its VFP state) using the thread switch code above), and CPU0 calls vfp_sync_hwstate(). if (vfp_current_hw_state[cpu] == &thread->vfpstate) { vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); BANG, we corrupt interesting_thread's VFP state by overwriting the more up-to-date state saved by CPU1 with the old VFP state from CPU0. Fix this by ensuring that we have sane semantics for the various state describing variables: 1. vfp_current_hw_state[] points to the current owner of the context information stored in each CPUs hardware, or NULL if that state information is invalid. 2. thread->vfpstate.hard.cpu always contains the most recent CPU number which the state was loaded into or NR_CPUS if no CPU owns the state. So, for a particular CPU to be a valid owner of the VFP state for a particular thread t, two things must be true: vfp_current_hw_state[cpu] == &t->vfpstate && t->vfpstate.hard.cpu == cpu. and that is valid from the moment a CPU loads the saved VFP context into the hardware. This gives clear and consistent semantics to interpreting these variables. This patch also fixes thread copying, ensuring that t->vfpstate.hard.cpu is invalidated, otherwise CPU0 may believe it was the last owner. The hole can happen thus: - thread1 runs on CPU2 using VFP, migrates to CPU3, exits and thread_info freed. - New thread allocated from a previously running thread on CPU2, reusing memory for thread1 and copying vfp.hard.cpu. At this point, the following are true: new_thread1->vfpstate.hard.cpu == 2 &new_thread1->vfpstate == vfp_current_hw_state[2] Lastly, this also addresses thread flushing in a similar way to thread copying. Hole is: - thread runs on CPU0, using VFP, migrates to CPU1 but does not use VFP. - thread calls execve(), so thread flush happens, leaving vfp_current_hw_state[0] intact. This vfpstate is memset to 0 causing thread->vfpstate.hard.cpu = 0. - thread migrates back to CPU0 before using VFP. At this point, the following are true: thread->vfpstate.hard.cpu == 0 &thread->vfpstate == vfp_current_hw_state[0] Signed-off-by: Russell King --- arch/arm/kernel/asm-offsets.c | 3 ++ arch/arm/vfp/vfphw.S | 43 +++++++++++++++---- arch/arm/vfp/vfpmodule.c | 98 +++++++++++++++++++++++-------------------- 3 files changed, 89 insertions(+), 55 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 927522cfc12..16baba2e436 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -59,6 +59,9 @@ int main(void) DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); +#ifdef CONFIG_SMP + DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu)); +#endif #ifdef CONFIG_ARM_THUMBEE DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); #endif diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 404538ae591..2d30c7f6edd 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -82,19 +82,22 @@ ENTRY(vfp_support_entry) ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled cmp r4, r10 @ this thread owns the hw context? +#ifndef CONFIG_SMP + @ For UP, checking that this thread owns the hw context is + @ sufficient to determine that the hardware state is valid. beq vfp_hw_state_valid + @ On UP, we lazily save the VFP context. As a different + @ thread wants ownership of the VFP hardware, save the old + @ state if there was a previous (valid) owner. + VFPFMXR FPEXC, r5 @ enable VFP, disable any pending @ exceptions, so we can get at the @ rest of it -#ifndef CONFIG_SMP - @ Save out the current registers to the old thread state - @ No need for SMP since this is not done lazily - DBGSTR1 "save old state %p", r4 - cmp r4, #0 - beq no_old_VFP_process + cmp r4, #0 @ if the vfp_current_hw_state is NULL + beq vfp_reload_hw @ then the hw state needs reloading VFPFSTMIA r4, r5 @ save the working registers VFPFMRX r5, FPSCR @ current status #ifndef CONFIG_CPU_FEROCEON @@ -107,11 +110,33 @@ ENTRY(vfp_support_entry) 1: #endif stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 - @ and point r4 at the word at the - @ start of the register dump +vfp_reload_hw: + +#else + @ For SMP, if this thread does not own the hw context, then we + @ need to reload it. No need to save the old state as on SMP, + @ we always save the state when we switch away from a thread. + bne vfp_reload_hw + + @ This thread has ownership of the current hardware context. + @ However, it may have been migrated to another CPU, in which + @ case the saved state is newer than the hardware context. + @ Check this by looking at the CPU number which the state was + @ last loaded onto. + ldr ip, [r10, #VFP_CPU] + teq ip, r11 + beq vfp_hw_state_valid + +vfp_reload_hw: + @ We're loading this threads state into the VFP hardware. Update + @ the CPU number which contains the most up to date VFP context. + str r11, [r10, #VFP_CPU] + + VFPFMXR FPEXC, r5 @ enable VFP, disable any pending + @ exceptions, so we can get at the + @ rest of it #endif -no_old_VFP_process: DBGSTR1 "load state %p", r10 str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer @ Load the saved state back into the VFP diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 3640351171b..08ff93fa533 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -34,19 +34,52 @@ void vfp_null_entry(void); void (*vfp_vector)(void) = vfp_null_entry; +/* + * Dual-use variable. + * Used in startup: set to non-zero if VFP checks fail + * After startup, holds VFP architecture + */ +unsigned int VFP_arch; + /* * The pointer to the vfpstate structure of the thread which currently * owns the context held in the VFP hardware, or NULL if the hardware * context is invalid. + * + * For UP, this is sufficient to tell which thread owns the VFP context. + * However, for SMP, we also need to check the CPU number stored in the + * saved state too to catch migrations. */ union vfp_state *vfp_current_hw_state[NR_CPUS]; /* - * Dual-use variable. - * Used in startup: set to non-zero if VFP checks fail - * After startup, holds VFP architecture + * Is 'thread's most up to date state stored in this CPUs hardware? + * Must be called from non-preemptible context. */ -unsigned int VFP_arch; +static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread) +{ +#ifdef CONFIG_SMP + if (thread->vfpstate.hard.cpu != cpu) + return false; +#endif + return vfp_current_hw_state[cpu] == &thread->vfpstate; +} + +/* + * Force a reload of the VFP context from the thread structure. We do + * this by ensuring that access to the VFP hardware is disabled, and + * clear last_VFP_context. Must be called from non-preemptible context. + */ +static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) +{ + if (vfp_state_in_hw(cpu, thread)) { + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + vfp_current_hw_state[cpu] = NULL; + } +#ifdef CONFIG_SMP + thread->vfpstate.hard.cpu = NR_CPUS; +#endif +} /* * Per-thread VFP initialization. @@ -60,6 +93,9 @@ static void vfp_thread_flush(struct thread_info *thread) vfp->hard.fpexc = FPEXC_EN; vfp->hard.fpscr = FPSCR_ROUND_NEAREST; +#ifdef CONFIG_SMP + vfp->hard.cpu = NR_CPUS; +#endif /* * Disable VFP to ensure we initialize it first. We must ensure @@ -90,6 +126,9 @@ static void vfp_thread_copy(struct thread_info *thread) vfp_sync_hwstate(parent); thread->vfpstate = parent->vfpstate; +#ifdef CONFIG_SMP + thread->vfpstate.hard.cpu = NR_CPUS; +#endif } /* @@ -135,17 +174,8 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) * case the thread migrates to a different CPU. The * restoring is done lazily. */ - if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) { + if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) vfp_save_state(vfp_current_hw_state[cpu], fpexc); - vfp_current_hw_state[cpu]->hard.cpu = cpu; - } - /* - * Thread migration, just force the reloading of the - * state on the new CPU in case the VFP registers - * contain stale data. - */ - if (thread->vfpstate.hard.cpu != cpu) - vfp_current_hw_state[cpu] = NULL; #endif /* @@ -449,15 +479,15 @@ static void vfp_pm_init(void) static inline void vfp_pm_init(void) { } #endif /* CONFIG_PM */ +/* + * Ensure that the VFP state stored in 'thread->vfpstate' is up to date + * with the hardware state. + */ void vfp_sync_hwstate(struct thread_info *thread) { unsigned int cpu = get_cpu(); - /* - * If the thread we're interested in is the current owner of the - * hardware VFP state, then we need to save its state. - */ - if (vfp_current_hw_state[cpu] == &thread->vfpstate) { + if (vfp_state_in_hw(cpu, thread)) { u32 fpexc = fmrx(FPEXC); /* @@ -471,36 +501,13 @@ void vfp_sync_hwstate(struct thread_info *thread) put_cpu(); } +/* Ensure that the thread reloads the hardware VFP state on the next use. */ void vfp_flush_hwstate(struct thread_info *thread) { unsigned int cpu = get_cpu(); - /* - * If the thread we're interested in is the current owner of the - * hardware VFP state, then we need to save its state. - */ - if (vfp_current_hw_state[cpu] == &thread->vfpstate) { - u32 fpexc = fmrx(FPEXC); + vfp_force_reload(cpu, thread); - fmxr(FPEXC, fpexc & ~FPEXC_EN); - - /* - * Set the context to NULL to force a reload the next time - * the thread uses the VFP. - */ - vfp_current_hw_state[cpu] = NULL; - } - -#ifdef CONFIG_SMP - /* - * For SMP we still have to take care of the case where the thread - * migrates to another CPU and then back to the original CPU on which - * the last VFP user is still the same thread. Mark the thread VFP - * state as belonging to a non-existent CPU so that the saved one will - * be reloaded in the above case. - */ - thread->vfpstate.hard.cpu = NR_CPUS; -#endif put_cpu(); } @@ -519,8 +526,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action, void *hcpu) { if (action == CPU_DYING || action == CPU_DYING_FROZEN) { - unsigned int cpu = (long)hcpu; - vfp_current_hw_state[cpu] = NULL; + vfp_force_reload((long)hcpu, current_thread_info()); } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) vfp_enable(NULL); return NOTIFY_OK; -- cgit v1.2.3-18-g5258 From 19dad35fe0f10df8f0524f98037469e3a0dd1ec2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 9 Jul 2011 17:41:33 +0100 Subject: ARM: vfp: ensure that thread flushing works if preempted Prevent a preemption event causing the initialized VFP state being overwritten by ensuring that the VFP hardware access is disabled prior to starting initialization. We can then do this in safety while still allowing preemption to occur. Signed-off-by: Russell King --- arch/arm/vfp/vfpmodule.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 08ff93fa533..0a96f71f0ab 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -89,24 +89,27 @@ static void vfp_thread_flush(struct thread_info *thread) union vfp_state *vfp = &thread->vfpstate; unsigned int cpu; - memset(vfp, 0, sizeof(union vfp_state)); - - vfp->hard.fpexc = FPEXC_EN; - vfp->hard.fpscr = FPSCR_ROUND_NEAREST; -#ifdef CONFIG_SMP - vfp->hard.cpu = NR_CPUS; -#endif - /* * Disable VFP to ensure we initialize it first. We must ensure - * that the modification of vfp_current_hw_state[] and hardware disable - * are done for the same CPU and without preemption. + * that the modification of vfp_current_hw_state[] and hardware + * disable are done for the same CPU and without preemption. + * + * Do this first to ensure that preemption won't overwrite our + * state saving should access to the VFP be enabled at this point. */ cpu = get_cpu(); if (vfp_current_hw_state[cpu] == vfp) vfp_current_hw_state[cpu] = NULL; fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); put_cpu(); + + memset(vfp, 0, sizeof(union vfp_state)); + + vfp->hard.fpexc = FPEXC_EN; + vfp->hard.fpscr = FPSCR_ROUND_NEAREST; +#ifdef CONFIG_SMP + vfp->hard.cpu = NR_CPUS; +#endif } static void vfp_thread_exit(struct thread_info *thread) -- cgit v1.2.3-18-g5258 From a065685d2f0b4bb69d0f64d2cd396a5c463db2a9 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 13 Jun 2011 10:42:19 +0200 Subject: ARM: pxa: fix gpio_to_chip() clash with gpiolib namespace The PXA platform code has a static inline helper called gpio_to_chip which clashes with the gpiolib namespace if we try to expose the function with the same name from gpiolib, and it's still confusing even if we don't do that. So rename it to gpio_to_pxachip(). Reported-by: H Hartley Sweeten Cc: Eric Miao Signed-off-by: Linus Walleij Signed-off-by: Eric Miao --- arch/arm/plat-pxa/gpio.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c index 48ebb9479b6..a11dc367050 100644 --- a/arch/arm/plat-pxa/gpio.c +++ b/arch/arm/plat-pxa/gpio.c @@ -50,7 +50,7 @@ static inline void __iomem *gpio_chip_base(struct gpio_chip *c) return container_of(c, struct pxa_gpio_chip, chip)->regbase; } -static inline struct pxa_gpio_chip *gpio_to_chip(unsigned gpio) +static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio) { return &pxa_gpio_chips[gpio_to_bank(gpio)]; } @@ -161,7 +161,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type) int gpio = irq_to_gpio(d->irq); unsigned long gpdr, mask = GPIO_bit(gpio); - c = gpio_to_chip(gpio); + c = gpio_to_pxachip(gpio); if (type == IRQ_TYPE_PROBE) { /* Don't mess with enabled GPIOs using preconfigured edges or @@ -230,7 +230,7 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) static void pxa_ack_muxed_gpio(struct irq_data *d) { int gpio = irq_to_gpio(d->irq); - struct pxa_gpio_chip *c = gpio_to_chip(gpio); + struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET); } @@ -238,7 +238,7 @@ static void pxa_ack_muxed_gpio(struct irq_data *d) static void pxa_mask_muxed_gpio(struct irq_data *d) { int gpio = irq_to_gpio(d->irq); - struct pxa_gpio_chip *c = gpio_to_chip(gpio); + struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); uint32_t grer, gfer; c->irq_mask &= ~GPIO_bit(gpio); @@ -252,7 +252,7 @@ static void pxa_mask_muxed_gpio(struct irq_data *d) static void pxa_unmask_muxed_gpio(struct irq_data *d) { int gpio = irq_to_gpio(d->irq); - struct pxa_gpio_chip *c = gpio_to_chip(gpio); + struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); c->irq_mask |= GPIO_bit(gpio); update_edge_detect(c); -- cgit v1.2.3-18-g5258 From 8c568df9d7e9b40063f0a5d4235e3d28df88f92a Mon Sep 17 00:00:00 2001 From: Sven Neumann Date: Wed, 25 May 2011 13:37:31 +0200 Subject: ARM: pxa/raumfeld: adapt to upcoming hardware change The backlight control is going to change back to PWM in the upcoming Raumfeld Controller hardware revision. Signed-off-by: Sven Neumann Acked-by: Daniel Mack Signed-off-by: Eric Miao --- arch/arm/mach-pxa/raumfeld.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index d130f77b6d1..8e5b3d8a86b 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -598,14 +598,15 @@ static void __init raumfeld_lcd_init(void) pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info); - /* Earlier devices had the backlight regulator controlled - * via PWM, later versions use another controller for that */ - if ((system_rev & 0xff) < 2) { + /* Hardware revision 2 has the backlight regulator controlled + * by an LT3593, earlier and later devices use PWM for that. */ + if ((system_rev & 0xff) == 2) { + platform_device_register(&raumfeld_lt3593_device); + } else { mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT; pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1); platform_device_register(&raumfeld_pwm_backlight_device); - } else - platform_device_register(&raumfeld_lt3593_device); + } ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable"); if (ret < 0) -- cgit v1.2.3-18-g5258 From a74fe1194b8da1dcee7529ba14999381bb9c25d9 Mon Sep 17 00:00:00 2001 From: Sven Neumann Date: Wed, 25 May 2011 13:37:32 +0200 Subject: ARM: pxa/raumfeld: display initialisation fixes The display requires some milliseconds between GPIO_TFT_VA_EN and GPIO_DISPLAY_ENABLE. Reorder initialisation to comply with the display spec. Also tune timings for better compliance with the spec. Signed-off-by: Sven Neumann Acked-by: Daniel Mack Signed-off-by: Eric Miao --- arch/arm/mach-pxa/raumfeld.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index 8e5b3d8a86b..9caa5e5c8d9 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -573,10 +573,10 @@ static struct pxafb_mode_info sharp_lq043t3dx02_mode = { .xres = 480, .yres = 272, .bpp = 16, - .hsync_len = 4, + .hsync_len = 41, .left_margin = 2, .right_margin = 1, - .vsync_len = 1, + .vsync_len = 10, .upper_margin = 3, .lower_margin = 1, .sync = 0, @@ -596,30 +596,31 @@ static void __init raumfeld_lcd_init(void) { int ret; - pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info); - - /* Hardware revision 2 has the backlight regulator controlled - * by an LT3593, earlier and later devices use PWM for that. */ - if ((system_rev & 0xff) == 2) { - platform_device_register(&raumfeld_lt3593_device); - } else { - mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT; - pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1); - platform_device_register(&raumfeld_pwm_backlight_device); - } - ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable"); if (ret < 0) pr_warning("Unable to request GPIO_TFT_VA_EN\n"); else gpio_direction_output(GPIO_TFT_VA_EN, 1); + msleep(100); + ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable"); if (ret < 0) pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n"); else gpio_direction_output(GPIO_DISPLAY_ENABLE, 1); + /* Hardware revision 2 has the backlight regulator controlled + * by an LT3593, earlier and later devices use PWM for that. */ + if ((system_rev & 0xff) == 2) { + platform_device_register(&raumfeld_lt3593_device); + } else { + mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT; + pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1); + platform_device_register(&raumfeld_pwm_backlight_device); + } + + pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info); platform_device_register(&pxa3xx_device_gcu); } -- cgit v1.2.3-18-g5258 From 9c0de4947a44166b16f189aa56f34791e9d7f552 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 25 May 2011 13:37:33 +0200 Subject: ARM: pxa/raumfeld: fix device name for codec ak4104 In commit f0fba2ad (ASoC: multi-component - ASoC Multi-Component Support), the name of the ak4104 codec driver was changed without amending the platform code which uses it as well. Signed-off-by: Daniel Mack Signed-off-by: Eric Miao --- arch/arm/mach-pxa/raumfeld.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index 9caa5e5c8d9..2f37d43f51b 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -659,10 +659,10 @@ static struct lis3lv02d_platform_data lis3_pdata = { #define SPI_AK4104 \ { \ - .modalias = "ak4104", \ - .max_speed_hz = 10000, \ - .bus_num = 0, \ - .chip_select = 0, \ + .modalias = "ak4104-codec", \ + .max_speed_hz = 10000, \ + .bus_num = 0, \ + .chip_select = 0, \ .controller_data = (void *) GPIO_SPDIF_CS, \ } -- cgit v1.2.3-18-g5258 From f299bb9527abfa6ee45a5e26288b5e3a619f01d6 Mon Sep 17 00:00:00 2001 From: Ido Yariv Date: Tue, 12 Jul 2011 00:03:11 +0300 Subject: arm: davinci: Fix low level gpio irq handlers' argument Commit 7416401 ("arm: davinci: Fix fallout from generic irq chip conversion") introduced a bug, causing low level interrupt handlers to get a bogus irq number as an argument. The gpio irq handler falsely assumes that the handler data is the irq base number and that is no longer true. Set the irq handler data to be a pointer to the corresponding gpio controller. The chained irq handler can then use it to extract both the irq base number and the gpio registers structure. Signed-off-by: Ido Yariv CC: Thomas Gleixner [nsekhar@ti.com: renamed "ctl" to "d", simplified indexing logic for chips and took care of odd bank handling in irq handler] Signed-off-by: Sekhar Nori --- arch/arm/mach-davinci/gpio.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c index e7221398e5a..cafbe13a82a 100644 --- a/arch/arm/mach-davinci/gpio.c +++ b/arch/arm/mach-davinci/gpio.c @@ -254,8 +254,10 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct davinci_gpio_regs __iomem *g; u32 mask = 0xffff; + struct davinci_gpio_controller *d; - g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc); + d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc); + g = (struct davinci_gpio_regs __iomem *)d->regs; /* we only care about one bank */ if (irq & 1) @@ -274,11 +276,14 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc) if (!status) break; __raw_writel(status, &g->intstat); - if (irq & 1) - status >>= 16; /* now demux them to the right lowlevel handler */ - n = (int)irq_get_handler_data(irq); + n = d->irq_base; + if (irq & 1) { + n += 16; + status >>= 16; + } + while (status) { res = ffs(status); n += res; @@ -424,7 +429,13 @@ static int __init davinci_gpio_irq_setup(void) /* set up all irqs in this bank */ irq_set_chained_handler(bank_irq, gpio_irq_handler); - irq_set_handler_data(bank_irq, (__force void *)g); + + /* + * Each chip handles 32 gpios, and each irq bank consists of 16 + * gpio irqs. Pass the irq bank's corresponding controller to + * the chained irq handler. + */ + irq_set_handler_data(bank_irq, &chips[gpio / 32]); for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { irq_set_chip(irq, &gpio_irqchip); -- cgit v1.2.3-18-g5258 From 022ae537b23cb14a391565e9ad9e9945f4b17138 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 8 Jul 2011 21:26:59 +0100 Subject: ARM: dma: replace ISA_DMA_THRESHOLD with a variable ISA_DMA_THRESHOLD has been unused by non-arch code, so lets now get rid of it from ARM by replacing it with arm_dma_zone_mask. Move dma_supported() and dma_set_mask() out of line, and have dma_supported() check this new variable instead. Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 29 ++--------------------------- arch/arm/include/asm/memory.h | 12 ------------ arch/arm/mm/dma-mapping.c | 35 ++++++++++++++++++++++++++++++++--- arch/arm/mm/init.c | 10 ++++++++++ arch/arm/mm/mm.h | 6 ++++++ 5 files changed, 50 insertions(+), 42 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 94662f4c9ea..7a21d0bf713 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -115,33 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, ___dma_page_dev_to_cpu(page, off, size, dir); } -/* - * Return whether the given device DMA address mask can be supported - * properly. For example, if your device can only drive the low 24-bits - * during bus mastering, then you would pass 0x00ffffff as the mask - * to this function. - * - * FIXME: This should really be a platform specific issue - we should - * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. - */ -static inline int dma_supported(struct device *dev, u64 mask) -{ - if (mask < ISA_DMA_THRESHOLD) - return 0; - return 1; -} - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - -#ifndef CONFIG_DMABOUNCE - *dev->dma_mask = dma_mask; -#endif - - return 0; -} +extern int dma_supported(struct device *, u64); +extern int dma_set_mask(struct device *, u64); /* * DMA errors are defined by all-bits-set in the DMA address. diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index af44a8fb348..b8de516e600 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -203,18 +203,6 @@ static inline unsigned long __phys_to_virt(unsigned long x) #define PHYS_OFFSET PLAT_PHYS_OFFSET #endif -/* - * The DMA mask corresponding to the maximum bus address allocatable - * using GFP_DMA. The default here places no restriction on DMA - * allocations. This must be the smallest DMA mask in the system, - * so a successful GFP_DMA allocation will always satisfy this. - */ -#ifndef ARM_DMA_ZONE_SIZE -#define ISA_DMA_THRESHOLD (0xffffffffULL) -#else -#define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1) -#endif - /* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 82a093cee09..0a0a1e7c20d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -25,9 +25,11 @@ #include #include +#include "mm.h" + static u64 get_coherent_dma_mask(struct device *dev) { - u64 mask = ISA_DMA_THRESHOLD; + u64 mask = (u64)arm_dma_limit; if (dev) { mask = dev->coherent_dma_mask; @@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev) return 0; } - if ((~mask) & ISA_DMA_THRESHOLD) { + if ((~mask) & (u64)arm_dma_limit) { dev_warn(dev, "coherent DMA mask %#llx is smaller " "than system GFP_DMA mask %#llx\n", - mask, (unsigned long long)ISA_DMA_THRESHOLD); + mask, (u64)arm_dma_limit); return 0; } } @@ -657,6 +659,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, } EXPORT_SYMBOL(dma_sync_sg_for_device); +/* + * Return whether the given device DMA address mask can be supported + * properly. For example, if your device can only drive the low 24-bits + * during bus mastering, then you would pass 0x00ffffff as the mask + * to this function. + */ +int dma_supported(struct device *dev, u64 mask) +{ + if (mask < (u64)arm_dma_limit) + return 0; + return 1; +} +EXPORT_SYMBOL(dma_supported); + +int dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (!dev->dma_mask || !dma_supported(dev, dma_mask)) + return -EIO; + +#ifndef CONFIG_DMABOUNCE + *dev->dma_mask = dma_mask; +#endif + + return 0; +} +EXPORT_SYMBOL(dma_set_mask); + #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c19571c40a2..17d6cd0c57e 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -212,6 +212,14 @@ static void __init arm_bootmem_init(unsigned long start_pfn, } #ifdef CONFIG_ZONE_DMA +/* + * The DMA mask corresponding to the maximum bus address allocatable + * using GFP_DMA. The default here places no restriction on DMA + * allocations. This must be the smallest DMA mask in the system, + * so a successful GFP_DMA allocation will always satisfy this. + */ +u32 arm_dma_limit; + static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, unsigned long dma_size) { @@ -278,6 +286,8 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, */ arm_adjust_dma_zone(zone_size, zhole_size, ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); + + arm_dma_limit = PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1; #endif free_area_init_node(0, zone_size, min, zhole_size); diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 5b3d7d54365..010566799c8 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #endif +#ifdef CONFIG_ZONE_DMA +extern u32 arm_dma_limit; +#else +#define arm_dma_limit ((u32)~0) +#endif + void __init bootmem_init(void); void arm_mm_memblock_reserve(void); -- cgit v1.2.3-18-g5258 From 900b170af40b4fef54e89f3d48d055f7d8031ae6 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Fri, 15 Jul 2011 21:33:12 +0200 Subject: ARM: fix regression in IXP4xx clocksource MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 234b6ceddb4fc2a4bc5b9a7670f070f6e69e0868 clocksource: convert ARM 32-bit up counting clocksources broke the build for ixp4xx and made big endian operation impossible. This commit restores the original behaviour. Signed-off-by: Richard Cochran Signed-off-by: Krzysztof Hałasa [ Thomas says that we might want to have generic BE accessor functions to the MMIO clock source, but that hasn't happened yet, so in the meantime this seems to be the short-term fix for the particular problem - Linus ] Signed-off-by: Linus Torvalds --- arch/arm/mach-ixp4xx/common.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 74ed81a3cb1..07772575d7a 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -419,14 +419,20 @@ static void notrace ixp4xx_update_sched_clock(void) /* * clocksource */ + +static cycle_t ixp4xx_clocksource_read(struct clocksource *c) +{ + return *IXP4XX_OSTS; +} + unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ; EXPORT_SYMBOL(ixp4xx_timer_freq); static void __init ixp4xx_clocksource_init(void) { init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq); - clocksource_mmio_init(&IXP4XX_OSTS, "OSTS", ixp4xx_timer_freq, 200, 32, - clocksource_mmio_readl_up); + clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32, + ixp4xx_clocksource_read); } /* -- cgit v1.2.3-18-g5258 From 691abd0abf8fd496c96fdb5b4fb64721f02aa513 Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Sat, 16 Jul 2011 11:13:47 +0900 Subject: ARM: SAMSUNG: Check NULL return from irq_alloc_generic_chip Signed-off-by: Todd Poynor Signed-off-by: Kukjin Kim --- arch/arm/plat-samsung/irq-uart.c | 7 +++++++ arch/arm/plat-samsung/irq-vic-timer.c | 7 +++++++ 2 files changed, 14 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c index 32582c0958e..38c53646bd6 100644 --- a/arch/arm/plat-samsung/irq-uart.c +++ b/arch/arm/plat-samsung/irq-uart.c @@ -54,6 +54,13 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq) gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base, handle_level_irq); + + if (!gc) { + pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n", + __func__, uirq->base_irq); + return; + } + ct = gc->chip_types; ct->chip.irq_ack = irq_gc_ack; ct->chip.irq_mask = irq_gc_mask_set_bit; diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c index a607546ddbd..f714d060370 100644 --- a/arch/arm/plat-samsung/irq-vic-timer.c +++ b/arch/arm/plat-samsung/irq-vic-timer.c @@ -54,6 +54,13 @@ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq) s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq, S3C64XX_TINT_CSTAT, handle_level_irq); + + if (!s3c_tgc) { + pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n", + __func__, timer_irq); + return; + } + ct = s3c_tgc->chip_types; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; -- cgit v1.2.3-18-g5258 From d670ac019f60e4932ba329bb0800bf2929e6d77c Mon Sep 17 00:00:00 2001 From: Sangwook Lee Date: Sat, 16 Jul 2011 15:50:19 +0900 Subject: ARM: SAMSUNG: DMA Cleanup as per sparse Function declaration differs between file: dma.c and file:dma.h and SPARSE (Documentation/sparse.txt) gives error messages All dma channels are members of 'enum dma_ch' and not 'unsigned int' Please have a look at channel definitions in: arch/arm/mach-s3c64xx/include/mach/dma.h arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h arch/arm/mach-s3c2410/include/mach/dma.h So all arguments should be of type 'enum dma_ch' Signed-off-by: Sangwook Lee Signed-off-by: Kukjin Kim --- arch/arm/mach-s3c64xx/dma.c | 14 +++++++------- arch/arm/plat-s3c24xx/dma.c | 12 ++++++------ arch/arm/plat-samsung/dma.c | 6 +++--- arch/arm/plat-samsung/include/plat/dma.h | 21 +++++++++++---------- 4 files changed, 27 insertions(+), 26 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c index b197171e7d0..204bfafe4bf 100644 --- a/arch/arm/mach-s3c64xx/dma.c +++ b/arch/arm/mach-s3c64xx/dma.c @@ -113,7 +113,7 @@ found: return chan; } -int s3c2410_dma_config(unsigned int channel, int xferunit) +int s3c2410_dma_config(enum dma_ch channel, int xferunit) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -297,7 +297,7 @@ static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan) return 0; } -int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) +int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -331,7 +331,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl); * */ -int s3c2410_dma_enqueue(unsigned int channel, void *id, +int s3c2410_dma_enqueue(enum dma_ch channel, void *id, dma_addr_t data, int size) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -415,7 +415,7 @@ err_buff: EXPORT_SYMBOL(s3c2410_dma_enqueue); -int s3c2410_dma_devconfig(unsigned int channel, +int s3c2410_dma_devconfig(enum dma_ch channel, enum s3c2410_dmasrc source, unsigned long devaddr) { @@ -463,7 +463,7 @@ int s3c2410_dma_devconfig(unsigned int channel, EXPORT_SYMBOL(s3c2410_dma_devconfig); -int s3c2410_dma_getposition(unsigned int channel, +int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -487,7 +487,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition); * get control of an dma channel */ -int s3c2410_dma_request(unsigned int channel, +int s3c2410_dma_request(enum dma_ch channel, struct s3c2410_dma_client *client, void *dev) { @@ -533,7 +533,7 @@ EXPORT_SYMBOL(s3c2410_dma_request); * allowed to go through. */ -int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) +int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned long flags; diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index a79a8ccd25f..539bd0e3def 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -712,7 +712,7 @@ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel); * get control of an dma channel */ -int s3c2410_dma_request(unsigned int channel, +int s3c2410_dma_request(enum dma_ch channel, struct s3c2410_dma_client *client, void *dev) { @@ -783,7 +783,7 @@ EXPORT_SYMBOL(s3c2410_dma_request); * allowed to go through. */ -int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) +int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned long flags; @@ -974,7 +974,7 @@ static int s3c2410_dma_started(struct s3c2410_dma_chan *chan) } int -s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) +s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -1021,7 +1021,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl); * xfersize: size of unit in bytes (1,2,4) */ -int s3c2410_dma_config(unsigned int channel, +int s3c2410_dma_config(enum dma_ch channel, int xferunit) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -1100,7 +1100,7 @@ EXPORT_SYMBOL(s3c2410_dma_config); * devaddr: physical address of the source */ -int s3c2410_dma_devconfig(unsigned int channel, +int s3c2410_dma_devconfig(enum dma_ch channel, enum s3c2410_dmasrc source, unsigned long devaddr) { @@ -1173,7 +1173,7 @@ EXPORT_SYMBOL(s3c2410_dma_devconfig); * returns the current transfer points for the dma source and destination */ -int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst) +int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); diff --git a/arch/arm/plat-samsung/dma.c b/arch/arm/plat-samsung/dma.c index cb459dd9545..6143aa14768 100644 --- a/arch/arm/plat-samsung/dma.c +++ b/arch/arm/plat-samsung/dma.c @@ -41,7 +41,7 @@ struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel) * irq? */ -int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn) +int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -56,7 +56,7 @@ int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn) } EXPORT_SYMBOL(s3c2410_dma_set_opfn); -int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn) +int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -71,7 +71,7 @@ int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn) } EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn); -int s3c2410_dma_setflags(unsigned int channel, unsigned int flags) +int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); diff --git a/arch/arm/plat-samsung/include/plat/dma.h b/arch/arm/plat-samsung/include/plat/dma.h index 2e8f8c6560d..8c273b7a6f5 100644 --- a/arch/arm/plat-samsung/include/plat/dma.h +++ b/arch/arm/plat-samsung/include/plat/dma.h @@ -42,6 +42,7 @@ struct s3c2410_dma_client { }; struct s3c2410_dma_chan; +enum dma_ch; /* s3c2410_dma_cbfn_t * @@ -62,7 +63,7 @@ typedef int (*s3c2410_dma_opfn_t)(struct s3c2410_dma_chan *, * request a dma channel exclusivley */ -extern int s3c2410_dma_request(unsigned int channel, +extern int s3c2410_dma_request(enum dma_ch channel, struct s3c2410_dma_client *, void *dev); @@ -71,14 +72,14 @@ extern int s3c2410_dma_request(unsigned int channel, * change the state of the dma channel */ -extern int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op); +extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op); /* s3c2410_dma_setflags * * set the channel's flags to a given state */ -extern int s3c2410_dma_setflags(unsigned int channel, +extern int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags); /* s3c2410_dma_free @@ -86,7 +87,7 @@ extern int s3c2410_dma_setflags(unsigned int channel, * free the dma channel (will also abort any outstanding operations) */ -extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *); +extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *); /* s3c2410_dma_enqueue * @@ -95,7 +96,7 @@ extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *); * drained before the buffer is given to the DMA system. */ -extern int s3c2410_dma_enqueue(unsigned int channel, void *id, +extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id, dma_addr_t data, int size); /* s3c2410_dma_config @@ -103,14 +104,14 @@ extern int s3c2410_dma_enqueue(unsigned int channel, void *id, * configure the dma channel */ -extern int s3c2410_dma_config(unsigned int channel, int xferunit); +extern int s3c2410_dma_config(enum dma_ch channel, int xferunit); /* s3c2410_dma_devconfig * * configure the device we're talking to */ -extern int s3c2410_dma_devconfig(unsigned int channel, +extern int s3c2410_dma_devconfig(enum dma_ch channel, enum s3c2410_dmasrc source, unsigned long devaddr); /* s3c2410_dma_getposition @@ -118,10 +119,10 @@ extern int s3c2410_dma_devconfig(unsigned int channel, * get the position that the dma transfer is currently at */ -extern int s3c2410_dma_getposition(unsigned int channel, +extern int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dest); -extern int s3c2410_dma_set_opfn(unsigned int, s3c2410_dma_opfn_t rtn); -extern int s3c2410_dma_set_buffdone_fn(unsigned int, s3c2410_dma_cbfn_t rtn); +extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn); +extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn); -- cgit v1.2.3-18-g5258 From 33e1e5e317b0fc78ac6588ec8aa40005bcddd7ce Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Sat, 16 Jul 2011 22:39:35 -0700 Subject: ARM: davinci: Check for NULL return from irq_alloc_generic_chip Avoid NULL dereference of irq_alloc_generic_chip return in low memory conditions. Signed-off-by: Todd Poynor Signed-off-by: Sekhar Nori --- arch/arm/mach-davinci/irq.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c index bfe68ec4e1a..85e77fd9d56 100644 --- a/arch/arm/mach-davinci/irq.c +++ b/arch/arm/mach-davinci/irq.c @@ -52,6 +52,12 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) struct irq_chip_type *ct; gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); + if (!gc) { + pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n", + __func__, irq_start); + return; + } + ct = gc->chip_types; ct->chip.irq_ack = irq_gc_ack; ct->chip.irq_mask = irq_gc_mask_clr_bit; -- cgit v1.2.3-18-g5258 From 9daedd833a38edd90cf7baa1b1fcf61c3a0721e3 Mon Sep 17 00:00:00 2001 From: Jon Povey Date: Tue, 19 Jul 2011 12:30:11 +0900 Subject: davinci: DM365 EVM: fix video input mux bits Video input mux settings for tvp7002 and imager inputs were swapped. Comment was correct. Tested on EVM with tvp7002 input. Signed-off-by: Jon Povey Acked-by: Manjunath Hadli Cc: stable@kernel.org Signed-off-by: Sekhar Nori --- arch/arm/mach-davinci/board-dm365-evm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index c67f684ee3e..09a87e61ffc 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -520,7 +520,7 @@ fail: */ if (have_imager()) { label = "HD imager"; - mux |= 1; + mux |= 2; /* externally mux MMC1/ENET/AIC33 to imager */ mux |= BIT(6) | BIT(5) | BIT(3); @@ -540,7 +540,7 @@ fail: resets &= ~BIT(1); if (have_tvp7002()) { - mux |= 2; + mux |= 1; resets &= ~BIT(2); label = "tvp7002 HD"; } else { -- cgit v1.2.3-18-g5258 From 540b573875bd26dfe39aa18d22dc195f275fc0df Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 13 Jul 2011 15:53:30 +0100 Subject: ARM: 6999/1: head, zImage: Always Enter the kernel in ARM state Currently, the documented kernel entry requirements are not explicit about whether the kernel should be entered in ARM or Thumb, leading to an ambiguitity about how to enter Thumb-2 kernels. As a result, the kernel is reliant on the zImage decompressor to enter the kernel proper in the correct instruction set state. This patch changes the boot entry protocol for head.S and Image to be the same as for zImage: in all cases, the kernel is now entered in ARM. Documentation/arm/Booting is updated to reflect this new policy. A different rule will be needed for Cortex-M class CPUs as and when support for those lands in mainline, since these CPUs don't support the ARM instruction set at all: a note is added to the effect that the kernel must be entered in Thumb on such systems. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/boot/compressed/head.S | 3 ++- arch/arm/kernel/head-nommu.S | 8 ++++++++ arch/arm/kernel/head.S | 8 ++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index f9da41921c5..51af7091d84 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -353,7 +353,8 @@ not_relocated: mov r0, #0 mov r0, #0 @ must be zero mov r1, r7 @ restore architecture number mov r2, r8 @ restore atags pointer - mov pc, r4 @ call kernel + ARM( mov pc, r4 ) @ call kernel + THUMB( bx r4 ) @ entry point is always ARM .align 2 .type LC0, #object diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 6b1e0ad9ec3..d46f25968be 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -32,8 +32,16 @@ * numbers for r1. * */ + .arm + __HEAD ENTRY(stext) + + THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. + THUMB( bx r9 ) @ If this is a Thumb-2 kernel, + THUMB( .thumb ) @ switch to Thumb now. + THUMB(1: ) + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode @ and irqs disabled #ifndef CONFIG_CPU_CP15 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 278c1b0ebb2..742b6108a00 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -71,8 +71,16 @@ * crap here - that's what the boot loader (or in extreme, well justified * circumstances, zImage) is for. */ + .arm + __HEAD ENTRY(stext) + + THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. + THUMB( bx r9 ) @ If this is a Thumb-2 kernel, + THUMB( .thumb ) @ switch to Thumb now. + THUMB(1: ) + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode @ and irqs disabled mrc p15, 0, r9, c0, c0 @ get processor id -- cgit v1.2.3-18-g5258 From 140d5dc1d7d86d766951b7a9aef5b66f1dfdfae2 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 13 Jul 2011 16:32:58 +0100 Subject: ARM: 7000/1: LPAE: Use long long printk format for displaying the pud Currently using just long but this is not enough for the LPAE format (64-bit entries). Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/fault.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bc0e1d88fd3..ee769234157 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -94,7 +94,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr) pud = pud_offset(pgd, addr); if (PTRS_PER_PUD != 1) - printk(", *pud=%08lx", pud_val(*pud)); + printk(", *pud=%08llx", (long long)pud_val(*pud)); if (pud_none(*pud)) break; -- cgit v1.2.3-18-g5258 From 2ef75701d1711a1feee2a82b42a2597ddc05f88b Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 21 Jul 2011 14:51:13 +0100 Subject: ARM: CPU hotplug: fix abuse of irqdesc->node irqdesc's node member is supposed to mark the numa node number for the interrupt. Our use of it is non-standard. Remove this, replacing the functionality with a test of the affinity mask. Signed-off-by: Russell King --- arch/arm/common/gic.c | 1 - arch/arm/kernel/irq.c | 10 ++-------- 2 files changed, 2 insertions(+), 9 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 4ddd0a6ac7f..635d9857b07 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -189,7 +189,6 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bit = 1 << (cpu + shift); spin_lock(&irq_controller_lock); - d->node = cpu; val = readl_relaxed(reg) & ~mask; writel_relaxed(val | bit, reg); spin_unlock(&irq_controller_lock); diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 83bbad03fcc..d7aa5c97877 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -166,15 +166,9 @@ void migrate_irqs(void) bool affinity_broken = false; raw_spin_lock(&desc->lock); - do { - if (desc->action == NULL) - break; - - if (d->node != cpu) - break; - + if (desc->action != NULL && + cpumask_test_cpu(smp_processor_id(), d->affinity)) affinity_broken = migrate_one_irq(d); - } while (0); raw_spin_unlock(&desc->lock); if (affinity_broken && printk_ratelimit()) -- cgit v1.2.3-18-g5258 From 5dfc54e087c15f823ee9b6541d2f0f314e69cbed Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 21 Jul 2011 15:00:57 +0100 Subject: ARM: GIC: avoid routing interrupts to offline CPUs The irq_set_affinity() method can be called with masks which include offline CPUs. This allows offline CPUs to have interrupts routed to them by writing to /proc/irq/*/smp_affinity after hotplug has taken a CPU offline. Fix this by ensuring that we select a target CPU present in both the required affinity and the online CPU mask. Ensure that we return IRQ_SET_MASK_OK (which happens to be 0) on success to ensure generic code copies the new mask into the irq_data structure. Signed-off-by: Russell King --- arch/arm/common/gic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 635d9857b07..7bdd91766d6 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -179,10 +179,10 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, { void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); unsigned int shift = (d->irq % 4) * 8; - unsigned int cpu = cpumask_first(mask_val); + unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); u32 val, mask, bit; - if (cpu >= 8) + if (cpu >= 8 || cpu >= nr_cpu_ids) return -EINVAL; mask = 0xff << shift; @@ -193,7 +193,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, writel_relaxed(val | bit, reg); spin_unlock(&irq_controller_lock); - return 0; + return IRQ_SET_MASK_OK; } #endif -- cgit v1.2.3-18-g5258 From ca15af19ac07908c8ca386f6d944a18aa343b868 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 21 Jul 2011 15:07:56 +0100 Subject: ARM: CPU hotplug: pass in proper affinity mask on IRQ migration Now that the GIC takes care of selecting a target interrupt from the affinity mask, we don't need all this complexity in the core code anymore. Just detect when we need to break affinity. Signed-off-by: Russell King --- arch/arm/kernel/irq.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index d7aa5c97877..ab63c05290e 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -133,17 +133,15 @@ int __init arch_probe_nr_irqs(void) static bool migrate_one_irq(struct irq_data *d) { - unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); + const struct cpumask *affinity = d->affinity; bool ret = false; - if (cpu >= nr_cpu_ids) { - cpu = cpumask_any(cpu_online_mask); + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { + affinity cpu_online_mask; ret = true; } - pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); - - d->chip->irq_set_affinity(d, cpumask_of(cpu), true); + d->chip->irq_set_affinity(d, affinity, true); return ret; } -- cgit v1.2.3-18-g5258 From 78359cb86b8c4c8946f6732eac2757fa5e1d4de4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 21 Jul 2011 15:14:21 +0100 Subject: ARM: CPU hotplug: ensure we migrate all IRQs off a downed CPU Our selection of interrupts to consider for IRQ migration is sub- standard. We were potentially including per-CPU interrupts in our migration strategy, but omitting chained interrupts. This caused some interrupts to remain on a downed CPU. We were also trying to migrate interrupts which were not migratable, resulting in an OOPS. Instead, iterate over all interrupts, skipping per-CPU interrupts or interrupts whose affinity does not include the downed CPU, and attempt to set the affinity for every one else if their chip implements irq_set_affinity(). Signed-off-by: Russell King --- arch/arm/kernel/irq.c | 39 ++++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index ab63c05290e..0f928a131af 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -131,46 +131,63 @@ int __init arch_probe_nr_irqs(void) #ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_data *d) +static bool migrate_one_irq(struct irq_desc *desc) { + struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->affinity; + struct irq_chip *c; bool ret = false; + /* + * If this is a per-CPU interrupt, or the affinity does not + * include this CPU, then we have nothing to do. + */ + if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) + return false; + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - affinity cpu_online_mask; + affinity = cpu_online_mask; ret = true; } - d->chip->irq_set_affinity(d, affinity, true); + c = irq_data_get_irq_chip(d); + if (c->irq_set_affinity) + c->irq_set_affinity(d, affinity, true); + else + pr_debug("IRQ%u: unable to set affinity\n", d->irq); return ret; } /* - * The CPU has been marked offline. Migrate IRQs off this CPU. If - * the affinity settings do not allow other CPUs, force them onto any + * The current CPU has been marked offline. Migrate IRQs off this CPU. + * If the affinity settings do not allow other CPUs, force them onto any * available CPU. + * + * Note: we must iterate over all IRQs, whether they have an attached + * action structure or not, as we need to get chained interrupts too. */ void migrate_irqs(void) { - unsigned int i, cpu = smp_processor_id(); + unsigned int i; struct irq_desc *desc; unsigned long flags; local_irq_save(flags); for_each_irq_desc(i, desc) { - struct irq_data *d = &desc->irq_data; bool affinity_broken = false; + if (!desc) + continue; + raw_spin_lock(&desc->lock); - if (desc->action != NULL && - cpumask_test_cpu(smp_processor_id(), d->affinity)) - affinity_broken = migrate_one_irq(d); + affinity_broken = migrate_one_irq(desc); raw_spin_unlock(&desc->lock); if (affinity_broken && printk_ratelimit()) - pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); + pr_warning("IRQ%u no longer affine to CPU%u\n", i, + smp_processor_id()); } local_irq_restore(flags); -- cgit v1.2.3-18-g5258 From 24daa15aabaf9ef655f7ca868366c8a4c87397f3 Mon Sep 17 00:00:00 2001 From: Petr Štetiar Date: Sat, 9 Jul 2011 14:18:36 +0100 Subject: ARM: 6997/1: ep93xx: increase NR_BANKS to 16 for support of 128MB RAM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I've got hands on one ts-7300 board, which is equiped with 128MB RAM in two 64MB memory chips, so it's 16 banks/8MB each. Without this patch, the bootmem init code complains about small NR_BANKS number and only lower 64MB is accessible. Cc: Ryan Mallon Acked-by: H Hartley Sweeten Signed-off-by: Petr Štetiar Signed-off-by: Russell King --- arch/arm/include/asm/setup.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index ee2ad8ae07a..b13720d2c98 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -192,7 +192,11 @@ static struct tagtable __tagtable_##fn __tag = { tag, fn } /* * Memory map description */ -#define NR_BANKS 8 +#ifdef CONFIG_ARCH_EP93XX +# define NR_BANKS 16 +#else +# define NR_BANKS 8 +#endif struct membank { phys_addr_t start; -- cgit v1.2.3-18-g5258 From 4cde7e0dca98e0e93dceb174d83f766d0d637c23 Mon Sep 17 00:00:00 2001 From: Heechul Yun Date: Tue, 19 Jul 2011 13:26:54 +0100 Subject: ARM: 6998/2: kernel: use proper memory barriers for bitops Improve scalability by avoiding costly and unnecessary L2 cache sync in handling bitops. Signed-off-by: Heechul Yun Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/bitops.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index b4892a06442..f4280593dfa 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -26,8 +26,8 @@ #include #include -#define smp_mb__before_clear_bit() mb() -#define smp_mb__after_clear_bit() mb() +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() /* * These functions are the basis of our bit ops. -- cgit v1.2.3-18-g5258 From e7d59db91a346f5069692e3b1f4e0afd100096dc Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Fri, 22 Jul 2011 16:47:26 +0100 Subject: ARM: 7004/1: fix traps.h compile warnings Building kernel 3.0 for an n2100 (plat-iop) results in: In file included from arch/arm/plat-iop/cp6.c:20: /tmp/linux-3.0/arch/arm/include/asm/traps.h:12: warning: 'struct pt_regs' declared inside parameter list /tmp/linux-3.0/arch/arm/include/asm/traps.h:12: warning: its scope is only this definition or declaration, which is probably not what you want /tmp/linux-3.0/arch/arm/include/asm/traps.h:48: warning: 'struct pt_regs' declared inside parameter list /tmp/linux-3.0/arch/arm/include/asm/traps.h:48: warning: 'struct task_struct' declared inside parameter list arch/arm/plat-iop/cp6.c:45: warning: initialization from incompatible pointer type Nothing here depends on the layout of pt_regs or task_struct, so this can be fixed by adding forward struct declarations to asm/traps.h. Signed-off-by: Mikael Pettersson Signed-off-by: Russell King --- arch/arm/include/asm/traps.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index f90756dc16d..5b29a667362 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -3,6 +3,9 @@ #include +struct pt_regs; +struct task_struct; + struct undef_hook { struct list_head node; u32 instr_mask; -- cgit v1.2.3-18-g5258