diff options
Diffstat (limited to 'arch')
255 files changed, 3718 insertions, 10674 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 90561c45e7d..8e47709160f 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, data.period = event->hw.last_period; if (alpha_perf_event_set_period(event, hwc, idx)) { - if (perf_event_overflow(event, 1, &data, regs)) { + if (perf_event_overflow(event, &data, regs)) { /* Interrupts coming too quickly; "throttle" the * counter, i.e., disable it for a little while. */ diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 818e74ed45d..f20d1b5396b 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -91,7 +91,7 @@ DEFINE_PER_CPU(u8, irq_work_pending); #define test_irq_work_pending() __get_cpu_var(irq_work_pending) #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 -void set_irq_work_pending(void) +void arch_irq_work_raise(void) { set_irq_work_pending_flag(); } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9adc278a22a..e04fa9d7637 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -642,6 +642,7 @@ config ARCH_SHMOBILE select NO_IOPORT select SPARSE_IRQ select MULTI_IRQ_HANDLER + select PM_GENERIC_DOMAINS if PM help Support for Renesas's SH-Mobile and R-Mobile ARM platforms. diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig index 47ad3b1a4fe..5a584520db2 100644 --- a/arch/arm/configs/mmp2_defconfig +++ b/arch/arm/configs/mmp2_defconfig @@ -8,6 +8,7 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_ARCH_MMP=y +CONFIG_MACH_BROWNSTONE=y CONFIG_MACH_FLINT=y CONFIG_MACH_MARVELL_JASPER=y CONFIG_HIGH_RES_TIMERS=y @@ -63,10 +64,16 @@ CONFIG_BACKLIGHT_MAX8925=y # CONFIG_USB_SUPPORT is not set CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_MAX8925=y +CONFIG_MMC=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y +CONFIG_MSDOS_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y @@ -81,7 +88,7 @@ CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_DEBUG_INFO=y # CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_DYNAMIC_DEBUG=y +# CONFIG_DYNAMIC_DEBUG is not set CONFIG_DEBUG_USER=y CONFIG_DEBUG_ERRORS=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index f1e8dd94afe..dd7f3b9f4cb 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -173,6 +173,20 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; enum armv6mpcore_perf_types { @@ -310,6 +324,20 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; static inline unsigned long @@ -479,7 +507,7 @@ armv6pmu_handle_irq(int irq_num, if (!armpmu_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 0, &data, regs)) + if (perf_event_overflow(event, &data, regs)) armpmu->disable(hwc, idx); } diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4960686afb5..e20ca9cafef 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -255,6 +255,20 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; /* @@ -371,6 +385,20 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; /* @@ -787,7 +815,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) if (!armpmu_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 0, &data, regs)) + if (perf_event_overflow(event, &data, regs)) armpmu->disable(hwc, idx); } diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 39affbe4fdb..3c4397491d0 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -144,6 +144,20 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; #define XSCALE_PMU_ENABLE 0x001 @@ -251,7 +265,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) if (!armpmu_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 0, &data, regs)) + if (perf_event_overflow(event, &data, regs)) armpmu->disable(hwc, idx); } @@ -583,7 +597,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) if (!armpmu_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 0, &data, regs)) + if (perf_event_overflow(event, &data, regs)) armpmu->disable(hwc, idx); } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 97260060bf2..5c199610719 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -396,7 +396,7 @@ static long ptrace_hbp_idx_to_num(int idx) /* * Handle hitting a HW-breakpoint. */ -static void ptrace_hbptriggered(struct perf_event *bp, int unused, +static void ptrace_hbptriggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { @@ -479,7 +479,8 @@ static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) attr.bp_type = type; attr.disabled = 1; - return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk); + return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, + tsk); } static int ptrace_gethbpregs(struct task_struct *tsk, long num, diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 40ee7e5045e..5f452f8fde0 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr) unsigned int address, destreg, data, type; unsigned int res = 0; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); if (current->pid != previous_pid) { pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", diff --git a/arch/arm/mach-at91/include/mach/at91_mci.h b/arch/arm/mach-at91/include/mach/at91_mci.h deleted file mode 100644 index 02182c16a02..00000000000 --- a/arch/arm/mach-at91/include/mach/at91_mci.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * arch/arm/mach-at91/include/mach/at91_mci.h - * - * Copyright (C) 2005 Ivan Kokshaysky - * Copyright (C) SAN People - * - * MultiMedia Card Interface (MCI) registers. - * Based on AT91RM9200 datasheet revision F. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef AT91_MCI_H -#define AT91_MCI_H - -#define AT91_MCI_CR 0x00 /* Control Register */ -#define AT91_MCI_MCIEN (1 << 0) /* Multi-Media Interface Enable */ -#define AT91_MCI_MCIDIS (1 << 1) /* Multi-Media Interface Disable */ -#define AT91_MCI_PWSEN (1 << 2) /* Power Save Mode Enable */ -#define AT91_MCI_PWSDIS (1 << 3) /* Power Save Mode Disable */ -#define AT91_MCI_SWRST (1 << 7) /* Software Reset */ - -#define AT91_MCI_MR 0x04 /* Mode Register */ -#define AT91_MCI_CLKDIV (0xff << 0) /* Clock Divider */ -#define AT91_MCI_PWSDIV (7 << 8) /* Power Saving Divider */ -#define AT91_MCI_RDPROOF (1 << 11) /* Read Proof Enable [SAM926[03] only] */ -#define AT91_MCI_WRPROOF (1 << 12) /* Write Proof Enable [SAM926[03] only] */ -#define AT91_MCI_PDCFBYTE (1 << 13) /* PDC Force Byte Transfer [SAM926[03] only] */ -#define AT91_MCI_PDCPADV (1 << 14) /* PDC Padding Value */ -#define AT91_MCI_PDCMODE (1 << 15) /* PDC-orientated Mode */ -#define AT91_MCI_BLKLEN (0xfff << 18) /* Data Block Length */ - -#define AT91_MCI_DTOR 0x08 /* Data Timeout Register */ -#define AT91_MCI_DTOCYC (0xf << 0) /* Data Timeout Cycle Number */ -#define AT91_MCI_DTOMUL (7 << 4) /* Data Timeout Multiplier */ -#define AT91_MCI_DTOMUL_1 (0 << 4) -#define AT91_MCI_DTOMUL_16 (1 << 4) -#define AT91_MCI_DTOMUL_128 (2 << 4) -#define AT91_MCI_DTOMUL_256 (3 << 4) -#define AT91_MCI_DTOMUL_1K (4 << 4) -#define AT91_MCI_DTOMUL_4K (5 << 4) -#define AT91_MCI_DTOMUL_64K (6 << 4) -#define AT91_MCI_DTOMUL_1M (7 << 4) - -#define AT91_MCI_SDCR 0x0c /* SD Card Register */ -#define AT91_MCI_SDCSEL (3 << 0) /* SD Card Selector */ -#define AT91_MCI_SDCBUS (1 << 7) /* 1-bit or 4-bit bus */ - -#define AT91_MCI_ARGR 0x10 /* Argument Register */ - -#define AT91_MCI_CMDR 0x14 /* Command Register */ -#define AT91_MCI_CMDNB (0x3f << 0) /* Command Number */ -#define AT91_MCI_RSPTYP (3 << 6) /* Response Type */ -#define AT91_MCI_RSPTYP_NONE (0 << 6) -#define AT91_MCI_RSPTYP_48 (1 << 6) -#define AT91_MCI_RSPTYP_136 (2 << 6) -#define AT91_MCI_SPCMD (7 << 8) /* Special Command */ -#define AT91_MCI_SPCMD_NONE (0 << 8) -#define AT91_MCI_SPCMD_INIT (1 << 8) -#define AT91_MCI_SPCMD_SYNC (2 << 8) -#define AT91_MCI_SPCMD_ICMD (4 << 8) -#define AT91_MCI_SPCMD_IRESP (5 << 8) -#define AT91_MCI_OPDCMD (1 << 11) /* Open Drain Command */ -#define AT91_MCI_MAXLAT (1 << 12) /* Max Latency for Command to Response */ -#define AT91_MCI_TRCMD (3 << 16) /* Transfer Command */ -#define AT91_MCI_TRCMD_NONE (0 << 16) -#define AT91_MCI_TRCMD_START (1 << 16) -#define AT91_MCI_TRCMD_STOP (2 << 16) -#define AT91_MCI_TRDIR (1 << 18) /* Transfer Direction */ -#define AT91_MCI_TRTYP (3 << 19) /* Transfer Type */ -#define AT91_MCI_TRTYP_BLOCK (0 << 19) -#define AT91_MCI_TRTYP_MULTIPLE (1 << 19) -#define AT91_MCI_TRTYP_STREAM (2 << 19) -#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19) -#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19) - -#define AT91_MCI_BLKR 0x18 /* Block Register */ -#define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */ -#define AT91_MCI_BLKR_BLKLEN(n) ((0xffff & (n)) << 16) /* Block length */ - -#define AT91_MCI_RSPR(n) (0x20 + ((n) * 4)) /* Response Registers 0-3 */ -#define AT91_MCR_RDR 0x30 /* Receive Data Register */ -#define AT91_MCR_TDR 0x34 /* Transmit Data Register */ - -#define AT91_MCI_SR 0x40 /* Status Register */ -#define AT91_MCI_CMDRDY (1 << 0) /* Command Ready */ -#define AT91_MCI_RXRDY (1 << 1) /* Receiver Ready */ -#define AT91_MCI_TXRDY (1 << 2) /* Transmit Ready */ -#define AT91_MCI_BLKE (1 << 3) /* Data Block Ended */ -#define AT91_MCI_DTIP (1 << 4) /* Data Transfer in Progress */ -#define AT91_MCI_NOTBUSY (1 << 5) /* Data Not Busy */ -#define AT91_MCI_ENDRX (1 << 6) /* End of RX Buffer */ -#define AT91_MCI_ENDTX (1 << 7) /* End fo TX Buffer */ -#define AT91_MCI_SDIOIRQA (1 << 8) /* SDIO Interrupt for Slot A */ -#define AT91_MCI_SDIOIRQB (1 << 9) /* SDIO Interrupt for Slot B */ -#define AT91_MCI_RXBUFF (1 << 14) /* RX Buffer Full */ -#define AT91_MCI_TXBUFE (1 << 15) /* TX Buffer Empty */ -#define AT91_MCI_RINDE (1 << 16) /* Response Index Error */ -#define AT91_MCI_RDIRE (1 << 17) /* Response Direction Error */ -#define AT91_MCI_RCRCE (1 << 18) /* Response CRC Error */ -#define AT91_MCI_RENDE (1 << 19) /* Response End Bit Error */ -#define AT91_MCI_RTOE (1 << 20) /* Response Time-out Error */ -#define AT91_MCI_DCRCE (1 << 21) /* Data CRC Error */ -#define AT91_MCI_DTOE (1 << 22) /* Data Time-out Error */ -#define AT91_MCI_OVRE (1 << 30) /* Overrun */ -#define AT91_MCI_UNRE (1 << 31) /* Underrun */ - -#define AT91_MCI_IER 0x44 /* Interrupt Enable Register */ -#define AT91_MCI_IDR 0x48 /* Interrupt Disable Register */ -#define AT91_MCI_IMR 0x4c /* Interrupt Mask Register */ - -#endif diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index 606a6f27ed6..5f5d7830887 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c @@ -20,6 +20,7 @@ #include <linux/spi/spi.h> #include <linux/spi/flash.h> +#include <asm/io.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/common.h> diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 1e0f809644b..e00d61e2efb 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c @@ -8,6 +8,7 @@ * is licensed "as is" without any warranty of any kind, whether express * or implied. */ +#include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/serial_8250.h> diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c index 1bd73a04be2..04c49f7543e 100644 --- a/arch/arm/mach-davinci/pm.c +++ b/arch/arm/mach-davinci/pm.c @@ -17,6 +17,7 @@ #include <asm/cacheflush.h> #include <asm/delay.h> +#include <asm/io.h> #include <mach/da8xx.h> #include <mach/sram.h> diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile index 33ee2c863d1..3cedcf2d39e 100644 --- a/arch/arm/mach-ep93xx/Makefile +++ b/arch/arm/mach-ep93xx/Makefile @@ -1,11 +1,13 @@ # # Makefile for the linux kernel. # -obj-y := core.o clock.o dma-m2p.o gpio.o +obj-y := core.o clock.o obj-m := obj-n := obj- := +obj-$(CONFIG_EP93XX_DMA) += dma.o + obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 6659a0d137a..c60f081e930 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -174,14 +174,10 @@ struct sys_timer ep93xx_timer = { /************************************************************************* * EP93xx IRQ handling *************************************************************************/ -extern void ep93xx_gpio_init_irq(void); - void __init ep93xx_init_irq(void) { vic_init(EP93XX_VIC1_BASE, 0, EP93XX_VIC1_VALID_IRQ_MASK, 0); vic_init(EP93XX_VIC2_BASE, 32, EP93XX_VIC2_VALID_IRQ_MASK, 0); - - ep93xx_gpio_init_irq(); } @@ -241,6 +237,24 @@ unsigned int ep93xx_chip_revision(void) } /************************************************************************* + * EP93xx GPIO + *************************************************************************/ +static struct resource ep93xx_gpio_resource[] = { + { + .start = EP93XX_GPIO_PHYS_BASE, + .end = EP93XX_GPIO_PHYS_BASE + 0xcc - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device ep93xx_gpio_device = { + .name = "gpio-ep93xx", + .id = -1, + .num_resources = ARRAY_SIZE(ep93xx_gpio_resource), + .resource = ep93xx_gpio_resource, +}; + +/************************************************************************* * EP93xx peripheral handling *************************************************************************/ #define EP93XX_UART_MCR_OFFSET (0x0100) @@ -492,11 +506,15 @@ static struct resource ep93xx_spi_resources[] = { }, }; +static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32); + static struct platform_device ep93xx_spi_device = { .name = "ep93xx-spi", .id = 0, .dev = { - .platform_data = &ep93xx_spi_master_data, + .platform_data = &ep93xx_spi_master_data, + .coherent_dma_mask = DMA_BIT_MASK(32), + .dma_mask = &ep93xx_spi_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_spi_resources), .resource = ep93xx_spi_resources, @@ -870,14 +888,13 @@ void __init ep93xx_register_ac97(void) platform_device_register(&ep93xx_pcm_device); } -extern void ep93xx_gpio_init(void); - void __init ep93xx_init_devices(void) { /* Disallow access to MaverickCrunch initially */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA); - ep93xx_gpio_init(); + /* Get the GPIO working early, other devices need it */ + platform_device_register(&ep93xx_gpio_device); amba_device_register(&uart1_device, &iomem_resource); amba_device_register(&uart2_device, &iomem_resource); diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c deleted file mode 100644 index a696d354b1f..00000000000 --- a/arch/arm/mach-ep93xx/dma-m2p.c +++ /dev/null @@ -1,411 +0,0 @@ -/* - * arch/arm/mach-ep93xx/dma-m2p.c - * M2P DMA handling for Cirrus EP93xx chips. - * - * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> - * Copyright (C) 2006 Applied Data Systems - * - * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - */ - -/* - * On the EP93xx chip the following peripherals my be allocated to the 10 - * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). - * - * I2S contains 3 Tx and 3 Rx DMA Channels - * AAC contains 3 Tx and 3 Rx DMA Channels - * UART1 contains 1 Tx and 1 Rx DMA Channels - * UART2 contains 1 Tx and 1 Rx DMA Channels - * UART3 contains 1 Tx and 1 Rx DMA Channels - * IrDA contains 1 Tx and 1 Rx DMA Channels - * - * SSP and IDE use the Memory to Memory (M2M) channels and are not covered - * with this implementation. - */ - -#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/io.h> - -#include <mach/dma.h> -#include <mach/hardware.h> - -#define M2P_CONTROL 0x00 -#define M2P_CONTROL_STALL_IRQ_EN (1 << 0) -#define M2P_CONTROL_NFB_IRQ_EN (1 << 1) -#define M2P_CONTROL_ERROR_IRQ_EN (1 << 3) -#define M2P_CONTROL_ENABLE (1 << 4) -#define M2P_INTERRUPT 0x04 -#define M2P_INTERRUPT_STALL (1 << 0) -#define M2P_INTERRUPT_NFB (1 << 1) -#define M2P_INTERRUPT_ERROR (1 << 3) -#define M2P_PPALLOC 0x08 -#define M2P_STATUS 0x0c -#define M2P_REMAIN 0x14 -#define M2P_MAXCNT0 0x20 -#define M2P_BASE0 0x24 -#define M2P_MAXCNT1 0x30 -#define M2P_BASE1 0x34 - -#define STATE_IDLE 0 /* Channel is inactive. */ -#define STATE_STALL 1 /* Channel is active, no buffers pending. */ -#define STATE_ON 2 /* Channel is active, one buffer pending. */ -#define STATE_NEXT 3 /* Channel is active, two buffers pending. */ - -struct m2p_channel { - char *name; - void __iomem *base; - int irq; - - struct clk *clk; - spinlock_t lock; - - void *client; - unsigned next_slot:1; - struct ep93xx_dma_buffer *buffer_xfer; - struct ep93xx_dma_buffer *buffer_next; - struct list_head buffers_pending; -}; - -static struct m2p_channel m2p_rx[] = { - {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1}, - {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3}, - {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5}, - {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7}, - {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9}, - {NULL}, -}; - -static struct m2p_channel m2p_tx[] = { - {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0}, - {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2}, - {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4}, - {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6}, - {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8}, - {NULL}, -}; - -static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf) -{ - if (ch->next_slot == 0) { - writel(buf->size, ch->base + M2P_MAXCNT0); - writel(buf->bus_addr, ch->base + M2P_BASE0); - } else { - writel(buf->size, ch->base + M2P_MAXCNT1); - writel(buf->bus_addr, ch->base + M2P_BASE1); - } - ch->next_slot ^= 1; -} - -static void choose_buffer_xfer(struct m2p_channel *ch) -{ - struct ep93xx_dma_buffer *buf; - - ch->buffer_xfer = NULL; - if (!list_empty(&ch->buffers_pending)) { - buf = list_entry(ch->buffers_pending.next, - struct ep93xx_dma_buffer, list); - list_del(&buf->list); - feed_buf(ch, buf); - ch->buffer_xfer = buf; - } -} - -static void choose_buffer_next(struct m2p_channel *ch) -{ - struct ep93xx_dma_buffer *buf; - - ch->buffer_next = NULL; - if (!list_empty(&ch->buffers_pending)) { - buf = list_entry(ch->buffers_pending.next, - struct ep93xx_dma_buffer, list); - list_del(&buf->list); - feed_buf(ch, buf); - ch->buffer_next = buf; - } -} - -static inline void m2p_set_control(struct m2p_channel *ch, u32 v) -{ - /* - * The control register must be read immediately after being written so - * that the internal state machine is correctly updated. See the ep93xx - * users' guide for details. - */ - writel(v, ch->base + M2P_CONTROL); - readl(ch->base + M2P_CONTROL); -} - -static inline int m2p_channel_state(struct m2p_channel *ch) -{ - return (readl(ch->base + M2P_STATUS) >> 4) & 0x3; -} - -static irqreturn_t m2p_irq(int irq, void *dev_id) -{ - struct m2p_channel *ch = dev_id; - struct ep93xx_dma_m2p_client *cl; - u32 irq_status, v; - int error = 0; - - cl = ch->client; - - spin_lock(&ch->lock); - irq_status = readl(ch->base + M2P_INTERRUPT); - - if (irq_status & M2P_INTERRUPT_ERROR) { - writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT); - error = 1; - } - - if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) { - spin_unlock(&ch->lock); - return IRQ_NONE; - } - - switch (m2p_channel_state(ch)) { - case STATE_IDLE: - pr_crit("dma interrupt without a dma buffer\n"); - BUG(); - break; - - case STATE_STALL: - cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error); - if (ch->buffer_next != NULL) { - cl->buffer_finished(cl->cookie, ch->buffer_next, - 0, error); - } - choose_buffer_xfer(ch); - choose_buffer_next(ch); - if (ch->buffer_xfer != NULL) - cl->buffer_started(cl->cookie, ch->buffer_xfer); - break; - - case STATE_ON: - cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error); - ch->buffer_xfer = ch->buffer_next; - choose_buffer_next(ch); - cl->buffer_started(cl->cookie, ch->buffer_xfer); - break; - - case STATE_NEXT: - pr_crit("dma interrupt while next\n"); - BUG(); - break; - } - - v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN | - M2P_CONTROL_NFB_IRQ_EN); - if (ch->buffer_xfer != NULL) - v |= M2P_CONTROL_STALL_IRQ_EN; - if (ch->buffer_next != NULL) - v |= M2P_CONTROL_NFB_IRQ_EN; - m2p_set_control(ch, v); - - spin_unlock(&ch->lock); - return IRQ_HANDLED; -} - -static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch; - int i; - - if (cl->flags & EP93XX_DMA_M2P_RX) - ch = m2p_rx; - else - ch = m2p_tx; - - for (i = 0; ch[i].base; i++) { - struct ep93xx_dma_m2p_client *client; - - client = ch[i].client; - if (client != NULL) { - int port; - - port = cl->flags & EP93XX_DMA_M2P_PORT_MASK; - if (port == (client->flags & - EP93XX_DMA_M2P_PORT_MASK)) { - pr_warning("DMA channel already used by %s\n", - cl->name ? : "unknown client"); - return ERR_PTR(-EBUSY); - } - } - } - - for (i = 0; ch[i].base; i++) { - if (ch[i].client == NULL) - return ch + i; - } - - pr_warning("No free DMA channel for %s\n", - cl->name ? : "unknown client"); - return ERR_PTR(-ENODEV); -} - -static void channel_enable(struct m2p_channel *ch) -{ - struct ep93xx_dma_m2p_client *cl = ch->client; - u32 v; - - clk_enable(ch->clk); - - v = cl->flags & EP93XX_DMA_M2P_PORT_MASK; - writel(v, ch->base + M2P_PPALLOC); - - v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK; - v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN; - m2p_set_control(ch, v); -} - -static void channel_disable(struct m2p_channel *ch) -{ - u32 v; - - v = readl(ch->base + M2P_CONTROL); - v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN); - m2p_set_control(ch, v); - - while (m2p_channel_state(ch) >= STATE_ON) - cpu_relax(); - - m2p_set_control(ch, 0x0); - - while (m2p_channel_state(ch) == STATE_STALL) - cpu_relax(); - - clk_disable(ch->clk); -} - -int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch; - int err; - - ch = find_free_channel(cl); - if (IS_ERR(ch)) - return PTR_ERR(ch); - - err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch); - if (err) - return err; - - ch->client = cl; - ch->next_slot = 0; - ch->buffer_xfer = NULL; - ch->buffer_next = NULL; - INIT_LIST_HEAD(&ch->buffers_pending); - - cl->channel = ch; - - channel_enable(ch); - - return 0; -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register); - -void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch = cl->channel; - - channel_disable(ch); - free_irq(ch->irq, ch); - ch->client = NULL; -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister); - -void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl, - struct ep93xx_dma_buffer *buf) -{ - struct m2p_channel *ch = cl->channel; - unsigned long flags; - u32 v; - - spin_lock_irqsave(&ch->lock, flags); - v = readl(ch->base + M2P_CONTROL); - if (ch->buffer_xfer == NULL) { - ch->buffer_xfer = buf; - feed_buf(ch, buf); - cl->buffer_started(cl->cookie, buf); - - v |= M2P_CONTROL_STALL_IRQ_EN; - m2p_set_control(ch, v); - - } else if (ch->buffer_next == NULL) { - ch->buffer_next = buf; - feed_buf(ch, buf); - - v |= M2P_CONTROL_NFB_IRQ_EN; - m2p_set_control(ch, v); - } else { - list_add_tail(&buf->list, &ch->buffers_pending); - } - spin_unlock_irqrestore(&ch->lock, flags); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit); - -void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl, - struct ep93xx_dma_buffer *buf) -{ - struct m2p_channel *ch = cl->channel; - - list_add_tail(&buf->list, &ch->buffers_pending); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive); - -void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch = cl->channel; - - channel_disable(ch); - ch->next_slot = 0; - ch->buffer_xfer = NULL; - ch->buffer_next = NULL; - INIT_LIST_HEAD(&ch->buffers_pending); - channel_enable(ch); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush); - -static int init_channel(struct m2p_channel *ch) -{ - ch->clk = clk_get(NULL, ch->name); - if (IS_ERR(ch->clk)) - return PTR_ERR(ch->clk); - - spin_lock_init(&ch->lock); - ch->client = NULL; - - return 0; -} - -static int __init ep93xx_dma_m2p_init(void) -{ - int i; - int ret; - - for (i = 0; m2p_rx[i].base; i++) { - ret = init_channel(m2p_rx + i); - if (ret) - return ret; - } - - for (i = 0; m2p_tx[i].base; i++) { - ret = init_channel(m2p_tx + i); - if (ret) - return ret; - } - - pr_info("M2P DMA subsystem initialized\n"); - return 0; -} -arch_initcall(ep93xx_dma_m2p_init); diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c new file mode 100644 index 00000000000..5a257088125 --- /dev/null +++ b/arch/arm/mach-ep93xx/dma.c @@ -0,0 +1,108 @@ +/* + * arch/arm/mach-ep93xx/dma.c + * + * Platform support code for the EP93xx dmaengine driver. + * + * Copyright (C) 2011 Mika Westerberg + * + * This work is based on the original dma-m2p implementation with + * following copyrights: + * + * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * Copyright (C) 2006 Applied Data Systems + * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> + +#include <mach/dma.h> +#include <mach/hardware.h> + +#define DMA_CHANNEL(_name, _base, _irq) \ + { .name = (_name), .base = (_base), .irq = (_irq) } + +/* + * DMA M2P channels. + * + * On the EP93xx chip the following peripherals my be allocated to the 10 + * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). + * + * I2S contains 3 Tx and 3 Rx DMA Channels + * AAC contains 3 Tx and 3 Rx DMA Channels + * UART1 contains 1 Tx and 1 Rx DMA Channels + * UART2 contains 1 Tx and 1 Rx DMA Channels + * UART3 contains 1 Tx and 1 Rx DMA Channels + * IrDA contains 1 Tx and 1 Rx DMA Channels + * + * Registers are mapped statically in ep93xx_map_io(). + */ +static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = { + DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0), + DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1), + DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2), + DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3), + DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4), + DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5), + DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6), + DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7), + DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8), + DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9), +}; + +static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = { + .channels = ep93xx_dma_m2p_channels, + .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels), +}; + +static struct platform_device ep93xx_dma_m2p_device = { + .name = "ep93xx-dma-m2p", + .id = -1, + .dev = { + .platform_data = &ep93xx_dma_m2p_data, + }, +}; + +/* + * DMA M2M channels. + * + * There are 2 M2M channels which support memcpy/memset and in addition simple + * hardware requests from/to SSP and IDE. We do not implement an external + * hardware requests. + * + * Registers are mapped statically in ep93xx_map_io(). + */ +static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = { + DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0), + DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1), +}; + +static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = { + .channels = ep93xx_dma_m2m_channels, + .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels), +}; + +static struct platform_device ep93xx_dma_m2m_device = { + .name = "ep93xx-dma-m2m", + .id = -1, + .dev = { + .platform_data = &ep93xx_dma_m2m_data, + }, +}; + +static int __init ep93xx_dma_init(void) +{ + platform_device_register(&ep93xx_dma_m2p_device); + platform_device_register(&ep93xx_dma_m2m_device); + return 0; +} +arch_initcall(ep93xx_dma_init); diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c deleted file mode 100644 index 415dce37b88..00000000000 --- a/arch/arm/mach-ep93xx/gpio.c +++ /dev/null @@ -1,410 +0,0 @@ -/* - * linux/arch/arm/mach-ep93xx/gpio.c - * - * Generic EP93xx GPIO handling - * - * Copyright (c) 2008 Ryan Mallon <ryan@bluewatersys.com> - * - * Based on code originally from: - * linux/arch/arm/mach-ep93xx/core.c - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/seq_file.h> -#include <linux/io.h> -#include <linux/gpio.h> -#include <linux/irq.h> - -#include <mach/hardware.h> - -/************************************************************************* - * Interrupt handling for EP93xx on-chip GPIOs - *************************************************************************/ -static unsigned char gpio_int_unmasked[3]; -static unsigned char gpio_int_enabled[3]; -static unsigned char gpio_int_type1[3]; -static unsigned char gpio_int_type2[3]; -static unsigned char gpio_int_debounce[3]; - -/* Port ordering is: A B F */ -static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c }; -static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 }; -static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 }; -static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 }; -static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 }; - -static void ep93xx_gpio_update_int_params(unsigned port) -{ - BUG_ON(port > 2); - - __raw_writeb(0, EP93XX_GPIO_REG(int_en_register_offset[port])); - - __raw_writeb(gpio_int_type2[port], - EP93XX_GPIO_REG(int_type2_register_offset[port])); - - __raw_writeb(gpio_int_type1[port], - EP93XX_GPIO_REG(int_type1_register_offset[port])); - - __raw_writeb(gpio_int_unmasked[port] & gpio_int_enabled[port], - EP93XX_GPIO_REG(int_en_register_offset[port])); -} - -static inline void ep93xx_gpio_int_mask(unsigned line) -{ - gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7)); -} - -static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable) -{ - int line = irq_to_gpio(irq); - int port = line >> 3; - int port_mask = 1 << (line & 7); - - if (enable) - gpio_int_debounce[port] |= port_mask; - else - gpio_int_debounce[port] &= ~port_mask; - - __raw_writeb(gpio_int_debounce[port], - EP93XX_GPIO_REG(int_debounce_register_offset[port])); -} - -static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - unsigned char status; - int i; - - status = __raw_readb(EP93XX_GPIO_A_INT_STATUS); - for (i = 0; i < 8; i++) { - if (status & (1 << i)) { - int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_A(0)) + i; - generic_handle_irq(gpio_irq); - } - } - - status = __raw_readb(EP93XX_GPIO_B_INT_STATUS); - for (i = 0; i < 8; i++) { - if (status & (1 << i)) { - int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_B(0)) + i; - generic_handle_irq(gpio_irq); - } - } -} - -static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - /* - * map discontiguous hw irq range to continuous sw irq range: - * - * IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7}) - */ - int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */ - int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx; - - generic_handle_irq(gpio_irq); -} - -static void ep93xx_gpio_irq_ack(struct irq_data *d) -{ - int line = irq_to_gpio(d->irq); - int port = line >> 3; - int port_mask = 1 << (line & 7); - - if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { - gpio_int_type2[port] ^= port_mask; /* switch edge direction */ - ep93xx_gpio_update_int_params(port); - } - - __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port])); -} - -static void ep93xx_gpio_irq_mask_ack(struct irq_data *d) -{ - int line = irq_to_gpio(d->irq); - int port = line >> 3; - int port_mask = 1 << (line & 7); - - if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) - gpio_int_type2[port] ^= port_mask; /* switch edge direction */ - - gpio_int_unmasked[port] &= ~port_mask; - ep93xx_gpio_update_int_params(port); - - __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port])); -} - -static void ep93xx_gpio_irq_mask(struct irq_data *d) -{ - int line = irq_to_gpio(d->irq); - int port = line >> 3; - - gpio_int_unmasked[port] &= ~(1 << (line & 7)); - ep93xx_gpio_update_int_params(port); -} - -static void ep93xx_gpio_irq_unmask(struct irq_data *d) -{ - int line = irq_to_gpio(d->irq); - int port = line >> 3; - - gpio_int_unmasked[port] |= 1 << (line & 7); - ep93xx_gpio_update_int_params(port); -} - -/* - * gpio_int_type1 controls whether the interrupt is level (0) or - * edge (1) triggered, while gpio_int_type2 controls whether it - * triggers on low/falling (0) or high/rising (1). - */ -static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) -{ - const int gpio = irq_to_gpio(d->irq); - const int port = gpio >> 3; - const int port_mask = 1 << (gpio & 7); - irq_flow_handler_t handler; - - gpio_direction_input(gpio); - - switch (type) { - case IRQ_TYPE_EDGE_RISING: - gpio_int_type1[port] |= port_mask; - gpio_int_type2[port] |= port_mask; - handler = handle_edge_irq; - break; - case IRQ_TYPE_EDGE_FALLING: - gpio_int_type1[port] |= port_mask; - gpio_int_type2[port] &= ~port_mask; - handler = handle_edge_irq; - break; - case IRQ_TYPE_LEVEL_HIGH: - gpio_int_type1[port] &= ~port_mask; - gpio_int_type2[port] |= port_mask; - handler = handle_level_irq; - break; - case IRQ_TYPE_LEVEL_LOW: - gpio_int_type1[port] &= ~port_mask; - gpio_int_type2[port] &= ~port_mask; - handler = handle_level_irq; - break; - case IRQ_TYPE_EDGE_BOTH: - gpio_int_type1[port] |= port_mask; - /* set initial polarity based on current input level */ - if (gpio_get_value(gpio)) - gpio_int_type2[port] &= ~port_mask; /* falling */ - else - gpio_int_type2[port] |= port_mask; /* rising */ - handler = handle_edge_irq; - break; - default: - pr_err("failed to set irq type %d for gpio %d\n", type, gpio); - return -EINVAL; - } - - __irq_set_handler_locked(d->irq, handler); - - gpio_int_enabled[port] |= port_mask; - - ep93xx_gpio_update_int_params(port); - - return 0; -} - -static struct irq_chip ep93xx_gpio_irq_chip = { - .name = "GPIO", - .irq_ack = ep93xx_gpio_irq_ack, - .irq_mask_ack = ep93xx_gpio_irq_mask_ack, - .irq_mask = ep93xx_gpio_irq_mask, - .irq_unmask = ep93xx_gpio_irq_unmask, - .irq_set_type = ep93xx_gpio_irq_type, -}; - -void __init ep93xx_gpio_init_irq(void) -{ - int gpio_irq; - - for (gpio_irq = gpio_to_irq(0); - gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) { - irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip, - handle_level_irq); - set_irq_flags(gpio_irq, IRQF_VALID); - } - - irq_set_chained_handler(IRQ_EP93XX_GPIO_AB, - ep93xx_gpio_ab_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO0MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO1MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO2MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO3MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO4MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO5MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO6MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO7MUX, - ep93xx_gpio_f_irq_handler); -} - - -/************************************************************************* - * gpiolib interface for EP93xx on-chip GPIOs - *************************************************************************/ -struct ep93xx_gpio_chip { - struct gpio_chip chip; - - void __iomem *data_reg; - void __iomem *data_dir_reg; -}; - -#define to_ep93xx_gpio_chip(c) container_of(c, struct ep93xx_gpio_chip, chip) - -static int ep93xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -{ - struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip); - unsigned long flags; - u8 v; - - local_irq_save(flags); - v = __raw_readb(ep93xx_chip->data_dir_reg); - v &= ~(1 << offset); - __raw_writeb(v, ep93xx_chip->data_dir_reg); - local_irq_restore(flags); - - return 0; -} - -static int ep93xx_gpio_direction_output(struct gpio_chip *chip, - unsigned offset, int val) -{ - struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip); - unsigned long flags; - int line; - u8 v; - - local_irq_save(flags); - - /* Set the value */ - v = __raw_readb(ep93xx_chip->data_reg); - if (val) - v |= (1 << offset); - else - v &= ~(1 << offset); - __raw_writeb(v, ep93xx_chip->data_reg); - - /* Drive as an output */ - line = chip->base + offset; - if (line <= EP93XX_GPIO_LINE_MAX_IRQ) { - /* Ports A/B/F */ - ep93xx_gpio_int_mask(line); - ep93xx_gpio_update_int_params(line >> 3); - } - - v = __raw_readb(ep93xx_chip->data_dir_reg); - v |= (1 << offset); - __raw_writeb(v, ep93xx_chip->data_dir_reg); - - local_irq_restore(flags); - - return 0; -} - -static int ep93xx_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip); - - return !!(__raw_readb(ep93xx_chip->data_reg) & (1 << offset)); -} - -static void ep93xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val) -{ - struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip); - unsigned long flags; - u8 v; - - local_irq_save(flags); - v = __raw_readb(ep93xx_chip->data_reg); - if (val) - v |= (1 << offset); - else - v &= ~(1 << offset); - __raw_writeb(v, ep93xx_chip->data_reg); - local_irq_restore(flags); -} - -static int ep93xx_gpio_set_debounce(struct gpio_chip *chip, - unsigned offset, unsigned debounce) -{ - int gpio = chip->base + offset; - int irq = gpio_to_irq(gpio); - - if (irq < 0) - return -EINVAL; - - ep93xx_gpio_int_debounce(irq, debounce ? true : false); - - return 0; -} - -#define EP93XX_GPIO_BANK(name, dr, ddr, base_gpio) \ - { \ - .chip = { \ - .label = name, \ - .direction_input = ep93xx_gpio_direction_input, \ - .direction_output = ep93xx_gpio_direction_output, \ - .get = ep93xx_gpio_get, \ - .set = ep93xx_gpio_set, \ - .base = base_gpio, \ - .ngpio = 8, \ - }, \ - .data_reg = EP93XX_GPIO_REG(dr), \ - .data_dir_reg = EP93XX_GPIO_REG(ddr), \ - } - -static struct ep93xx_gpio_chip ep93xx_gpio_banks[] = { - EP93XX_GPIO_BANK("A", 0x00, 0x10, 0), - EP93XX_GPIO_BANK("B", 0x04, 0x14, 8), - EP93XX_GPIO_BANK("C", 0x08, 0x18, 40), - EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24), - EP93XX_GPIO_BANK("E", 0x20, 0x24, 32), - EP93XX_GPIO_BANK("F", 0x30, 0x34, 16), - EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48), - EP93XX_GPIO_BANK("H", 0x40, 0x44, 56), -}; - -void __init ep93xx_gpio_init(void) -{ - int i; - - /* Set Ports C, D, E, G, and H for GPIO use */ - ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS | - EP93XX_SYSCON_DEVCFG_GONK | - EP93XX_SYSCON_DEVCFG_EONIDE | - EP93XX_SYSCON_DEVCFG_GONIDE | - EP93XX_SYSCON_DEVCFG_HONIDE); - - for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) { - struct gpio_chip *chip = &ep93xx_gpio_banks[i].chip; - - /* - * Ports A, B, and F support input debouncing when - * used as interrupts. - */ - if (!strcmp(chip->label, "A") || - !strcmp(chip->label, "B") || - !strcmp(chip->label, "F")) - chip->set_debounce = ep93xx_gpio_set_debounce; - - gpiochip_add(chip); - } -} diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h index 5e31b2b25da..46d4d876e6f 100644 --- a/arch/arm/mach-ep93xx/include/mach/dma.h +++ b/arch/arm/mach-ep93xx/include/mach/dma.h @@ -1,149 +1,93 @@ -/** - * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine - * - * The EP93xx DMA M2P subsystem handles DMA transfers between memory and - * peripherals. DMA M2P channels are available for audio, UARTs and IrDA. - * See chapter 10 of the EP93xx users guide for full details on the DMA M2P - * engine. - * - * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code. - * - */ - #ifndef __ASM_ARCH_DMA_H #define __ASM_ARCH_DMA_H -#include <linux/list.h> #include <linux/types.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> -/** - * struct ep93xx_dma_buffer - Information about a buffer to be transferred - * using the DMA M2P engine +/* + * M2P channels. * - * @list: Entry in DMA buffer list - * @bus_addr: Physical address of the buffer - * @size: Size of the buffer in bytes + * Note that these values are also directly used for setting the PPALLOC + * register. */ -struct ep93xx_dma_buffer { - struct list_head list; - u32 bus_addr; - u16 size; -}; +#define EP93XX_DMA_I2S1 0 +#define EP93XX_DMA_I2S2 1 +#define EP93XX_DMA_AAC1 2 +#define EP93XX_DMA_AAC2 3 +#define EP93XX_DMA_AAC3 4 +#define EP93XX_DMA_I2S3 5 +#define EP93XX_DMA_UART1 6 +#define EP93XX_DMA_UART2 7 +#define EP93XX_DMA_UART3 8 +#define EP93XX_DMA_IRDA 9 +/* M2M channels */ +#define EP93XX_DMA_SSP 10 +#define EP93XX_DMA_IDE 11 /** - * struct ep93xx_dma_m2p_client - Information about a DMA M2P client - * - * @name: Unique name for this client - * @flags: Client flags - * @cookie: User data to pass to callback functions - * @buffer_started: Non NULL function to call when a transfer is started. - * The arguments are the user data cookie and the DMA - * buffer which is starting. - * @buffer_finished: Non NULL function to call when a transfer is completed. - * The arguments are the user data cookie, the DMA buffer - * which has completed, and a boolean flag indicating if - * the transfer had an error. + * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine + * @port: peripheral which is requesting the channel + * @direction: TX/RX channel + * @name: optional name for the channel, this is displayed in /proc/interrupts + * + * This information is passed as private channel parameter in a filter + * function. Note that this is only needed for slave/cyclic channels. For + * memcpy channels %NULL data should be passed. */ -struct ep93xx_dma_m2p_client { - char *name; - u8 flags; - void *cookie; - void (*buffer_started)(void *cookie, - struct ep93xx_dma_buffer *buf); - void (*buffer_finished)(void *cookie, - struct ep93xx_dma_buffer *buf, - int bytes, int error); - - /* private: Internal use only */ - void *channel; +struct ep93xx_dma_data { + int port; + enum dma_data_direction direction; + const char *name; }; -/* DMA M2P ports */ -#define EP93XX_DMA_M2P_PORT_I2S1 0x00 -#define EP93XX_DMA_M2P_PORT_I2S2 0x01 -#define EP93XX_DMA_M2P_PORT_AAC1 0x02 -#define EP93XX_DMA_M2P_PORT_AAC2 0x03 -#define EP93XX_DMA_M2P_PORT_AAC3 0x04 -#define EP93XX_DMA_M2P_PORT_I2S3 0x05 -#define EP93XX_DMA_M2P_PORT_UART1 0x06 -#define EP93XX_DMA_M2P_PORT_UART2 0x07 -#define EP93XX_DMA_M2P_PORT_UART3 0x08 -#define EP93XX_DMA_M2P_PORT_IRDA 0x09 -#define EP93XX_DMA_M2P_PORT_MASK 0x0f - -/* DMA M2P client flags */ -#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */ -#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */ - -/* - * DMA M2P client error handling flags. See the EP93xx users guide - * documentation on the DMA M2P CONTROL register for more details - */ -#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */ -#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */ -#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */ - /** - * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P - * subsystem - * - * @m2p: Client information to register - * returns 0 on success - * - * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA - * client + * struct ep93xx_dma_chan_data - platform specific data for a DMA channel + * @name: name of the channel, used for getting the right clock for the channel + * @base: mapped registers + * @irq: interrupt number used by this channel */ -int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); +struct ep93xx_dma_chan_data { + const char *name; + void __iomem *base; + int irq; +}; /** - * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P - * subsystem - * - * @m2p: Client to unregister + * struct ep93xx_dma_platform_data - platform data for the dmaengine driver + * @channels: array of channels which are passed to the driver + * @num_channels: number of channels in the array * - * Any transfers currently in progress will be completed in hardware, but - * ignored in software. + * This structure is passed to the DMA engine driver via platform data. For + * M2P channels, contract is that even channels are for TX and odd for RX. + * There is no requirement for the M2M channels. */ -void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); +struct ep93xx_dma_platform_data { + struct ep93xx_dma_chan_data *channels; + size_t num_channels; +}; -/** - * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer - * - * @m2p: DMA Client to submit the transfer on - * @buf: DMA Buffer to submit - * - * If the current or next transfer positions are free on the M2P client then - * the transfer is started immediately. If not, the transfer is added to the - * list of pending transfers. This function must not be called from the - * buffer_finished callback for an M2P channel. - * - */ -void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p, - struct ep93xx_dma_buffer *buf); +static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p"); +} /** - * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list - * for an M2P channel + * ep93xx_dma_chan_direction - returns direction the channel can be used + * @chan: channel * - * @m2p: DMA Client to submit the transfer on - * @buf: DMA Buffer to submit - * - * This function must only be called from the buffer_finished callback for an - * M2P channel. It is commonly used to add the next transfer in a chained list - * of DMA transfers. + * This function can be used in filter functions to find out whether the + * channel supports given DMA direction. Only M2P channels have such + * limitation, for M2M channels the direction is configurable. */ -void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, - struct ep93xx_dma_buffer *buf); +static inline enum dma_data_direction +ep93xx_dma_chan_direction(struct dma_chan *chan) +{ + if (!ep93xx_dma_chan_is_m2p(chan)) + return DMA_NONE; -/** - * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client - * - * @m2p: DMA client to flush transfers on - * - * Any transfers currently in progress will be completed in hardware, but - * ignored in software. - * - */ -void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); + /* even channels are for TX, odd for RX */ + return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; +} #endif /* __ASM_ARCH_DMA_H */ diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h index 9ac4d105509..c4a7b84ef06 100644 --- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h +++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h @@ -98,6 +98,7 @@ #define EP93XX_SECURITY_BASE EP93XX_APB_IOMEM(0x00030000) +#define EP93XX_GPIO_PHYS_BASE EP93XX_APB_PHYS(0x00040000) #define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000) #define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x)) #define EP93XX_GPIO_F_INT_STATUS EP93XX_GPIO_REG(0x5c) diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h index 0a37961b345..9bb63ac13f0 100644 --- a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h +++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h @@ -7,9 +7,11 @@ struct spi_device; * struct ep93xx_spi_info - EP93xx specific SPI descriptor * @num_chipselect: number of chip selects on this board, must be * at least one + * @use_dma: use DMA for the transfers */ struct ep93xx_spi_info { int num_chipselect; + bool use_dma; }; /** diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c index a65838fc061..af1c580b06b 100644 --- a/arch/arm/mach-imx/clock-imx25.c +++ b/arch/arm/mach-imx/clock-imx25.c @@ -282,9 +282,10 @@ static struct clk_lookup lookups[] = { _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk) _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk) _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk) - _REGISTER_CLOCK("imx25-cspi.0", NULL, cspi1_clk) - _REGISTER_CLOCK("imx25-cspi.1", NULL, cspi2_clk) - _REGISTER_CLOCK("imx25-cspi.2", NULL, cspi3_clk) + /* i.mx25 has the i.mx35 type cspi */ + _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk) + _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk) + _REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk) _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk) _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk) _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk) diff --git a/arch/arm/mach-imx/dma-v1.c b/arch/arm/mach-imx/dma-v1.c index 236f1495efa..f8aa5be0eb1 100644 --- a/arch/arm/mach-imx/dma-v1.c +++ b/arch/arm/mach-imx/dma-v1.c @@ -26,6 +26,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> +#include <linux/err.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/scatterlist.h> diff --git a/arch/arm/mach-imx/mach-apf9328.c b/arch/arm/mach-imx/mach-apf9328.c index 15e45c84e37..59d2a3b137d 100644 --- a/arch/arm/mach-imx/mach-apf9328.c +++ b/arch/arm/mach-imx/mach-apf9328.c @@ -115,6 +115,8 @@ static struct platform_device *devices[] __initdata = { static void __init apf9328_init(void) { + imx1_soc_init(); + mxc_gpio_setup_multiple_pins(apf9328_pins, ARRAY_SIZE(apf9328_pins), "APF9328"); diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c index ffb40ff619b..ede2710f8b7 100644 --- a/arch/arm/mach-imx/mach-armadillo5x0.c +++ b/arch/arm/mach-imx/mach-armadillo5x0.c @@ -490,6 +490,8 @@ static struct platform_device *devices[] __initdata = { */ static void __init armadillo5x0_init(void) { + imx31_soc_init(); + mxc_iomux_setup_multiple_pins(armadillo5x0_pins, ARRAY_SIZE(armadillo5x0_pins), "armadillo5x0"); diff --git a/arch/arm/mach-imx/mach-bug.c b/arch/arm/mach-imx/mach-bug.c index 42e4f078a19..f49470553bd 100644 --- a/arch/arm/mach-imx/mach-bug.c +++ b/arch/arm/mach-imx/mach-bug.c @@ -42,6 +42,8 @@ static const unsigned int bug_pins[] __initconst = { static void __init bug_board_init(void) { + imx31_soc_init(); + mxc_iomux_setup_multiple_pins(bug_pins, ARRAY_SIZE(bug_pins), "uart-4"); imx31_add_imx_uart4(&uart_pdata); diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c index 46a2e41d43d..87887ac5806 100644 --- a/arch/arm/mach-imx/mach-cpuimx27.c +++ b/arch/arm/mach-imx/mach-cpuimx27.c @@ -250,6 +250,8 @@ __setup("otg_mode=", eukrea_cpuimx27_otg_mode); static void __init eukrea_cpuimx27_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(eukrea_cpuimx27_pins, ARRAY_SIZE(eukrea_cpuimx27_pins), "CPUIMX27"); diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c index 3f8ef825fa6..f39a478ba1a 100644 --- a/arch/arm/mach-imx/mach-cpuimx35.c +++ b/arch/arm/mach-imx/mach-cpuimx35.c @@ -156,6 +156,8 @@ __setup("otg_mode=", eukrea_cpuimx35_otg_mode); */ static void __init eukrea_cpuimx35_init(void) { + imx35_soc_init(); + mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx35_pads, ARRAY_SIZE(eukrea_cpuimx35_pads)); diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c index 148cff2819b..da36da52969 100644 --- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c +++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c @@ -125,6 +125,8 @@ __setup("otg_mode=", eukrea_cpuimx25_otg_mode); static void __init eukrea_cpuimx25_init(void) { + imx25_soc_init(); + if (mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx25_pads, ARRAY_SIZE(eukrea_cpuimx25_pads))) printk(KERN_ERR "error setting cpuimx25 pads !\n"); diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c index 7ae43b1ec51..c6269d60ddb 100644 --- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c +++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c @@ -231,6 +231,8 @@ static void __init visstrim_m10_board_init(void) { int ret; + imx27_soc_init(); + ret = mxc_gpio_setup_multiple_pins(visstrim_m10_pins, ARRAY_SIZE(visstrim_m10_pins), "VISSTRIM_M10"); if (ret) diff --git a/arch/arm/mach-imx/mach-imx27ipcam.c b/arch/arm/mach-imx/mach-imx27ipcam.c index 9be6cd6fbf8..272f793e924 100644 --- a/arch/arm/mach-imx/mach-imx27ipcam.c +++ b/arch/arm/mach-imx/mach-imx27ipcam.c @@ -50,6 +50,8 @@ static const int mx27ipcam_pins[] __initconst = { static void __init mx27ipcam_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(mx27ipcam_pins, ARRAY_SIZE(mx27ipcam_pins), "mx27ipcam"); diff --git a/arch/arm/mach-imx/mach-imx27lite.c b/arch/arm/mach-imx/mach-imx27lite.c index 841140516ed..d81a769fe89 100644 --- a/arch/arm/mach-imx/mach-imx27lite.c +++ b/arch/arm/mach-imx/mach-imx27lite.c @@ -59,6 +59,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = { static void __init mx27lite_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(mx27lite_pins, ARRAY_SIZE(mx27lite_pins), "imx27lite"); imx27_add_imx_uart0(&uart_pdata); diff --git a/arch/arm/mach-imx/mach-kzm_arm11_01.c b/arch/arm/mach-imx/mach-kzm_arm11_01.c index 1ecae20cf4e..e472a1d8805 100644 --- a/arch/arm/mach-imx/mach-kzm_arm11_01.c +++ b/arch/arm/mach-imx/mach-kzm_arm11_01.c @@ -223,6 +223,8 @@ static int kzm_pins[] __initdata = { */ static void __init kzm_board_init(void) { + imx31_soc_init(); + mxc_iomux_setup_multiple_pins(kzm_pins, ARRAY_SIZE(kzm_pins), "kzm"); kzm_init_ext_uart(); diff --git a/arch/arm/mach-imx/mach-mx1ads.c b/arch/arm/mach-imx/mach-mx1ads.c index 38ec5cbbda9..5cd8bee4696 100644 --- a/arch/arm/mach-imx/mach-mx1ads.c +++ b/arch/arm/mach-imx/mach-mx1ads.c @@ -115,6 +115,8 @@ static struct i2c_board_info mx1ads_i2c_devices[] = { */ static void __init mx1ads_init(void) { + imx1_soc_init(); + mxc_gpio_setup_multiple_pins(mx1ads_pins, ARRAY_SIZE(mx1ads_pins), "mx1ads"); diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c index 74ac88978dd..d389ecf9b5a 100644 --- a/arch/arm/mach-imx/mach-mx21ads.c +++ b/arch/arm/mach-imx/mach-mx21ads.c @@ -279,6 +279,8 @@ static struct platform_device *platform_devices[] __initdata = { static void __init mx21ads_board_init(void) { + imx21_soc_init(); + mxc_gpio_setup_multiple_pins(mx21ads_pins, ARRAY_SIZE(mx21ads_pins), "mx21ads"); diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c index 58ea3fdf091..01534bb6130 100644 --- a/arch/arm/mach-imx/mach-mx25_3ds.c +++ b/arch/arm/mach-imx/mach-mx25_3ds.c @@ -219,6 +219,8 @@ static const struct esdhc_platform_data mx25pdk_esdhc_pdata __initconst = { static void __init mx25pdk_init(void) { + imx25_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads, ARRAY_SIZE(mx25pdk_pads)); diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c index 6e1accf93f8..117ce0a50f4 100644 --- a/arch/arm/mach-imx/mach-mx27_3ds.c +++ b/arch/arm/mach-imx/mach-mx27_3ds.c @@ -267,6 +267,8 @@ static const struct imxi2c_platform_data mx27_3ds_i2c0_data __initconst = { static void __init mx27pdk_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(mx27pdk_pins, ARRAY_SIZE(mx27pdk_pins), "mx27pdk"); mx27_3ds_sdhc1_enable_level_translator(); diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c index 1db79506f5e..fc26ed71b9e 100644 --- a/arch/arm/mach-imx/mach-mx27ads.c +++ b/arch/arm/mach-imx/mach-mx27ads.c @@ -288,6 +288,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = { static void __init mx27ads_board_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(mx27ads_pins, ARRAY_SIZE(mx27ads_pins), "mx27ads"); diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c index 9b982449cb5..441fbb83f39 100644 --- a/arch/arm/mach-imx/mach-mx31_3ds.c +++ b/arch/arm/mach-imx/mach-mx31_3ds.c @@ -13,6 +13,7 @@ */ #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/types.h> #include <linux/init.h> #include <linux/clk.h> @@ -689,6 +690,8 @@ static void __init mx31_3ds_init(void) { int ret; + imx31_soc_init(); + mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins), "mx31_3ds"); diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c index f4dee025463..0ce49478a47 100644 --- a/arch/arm/mach-imx/mach-mx31ads.c +++ b/arch/arm/mach-imx/mach-mx31ads.c @@ -516,6 +516,8 @@ static void __init mx31ads_init_irq(void) static void __init mx31ads_init(void) { + imx31_soc_init(); + mxc_init_extuart(); mxc_init_imx_uart(); mxc_init_i2c(); diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c index 410e676ae08..750368ddf0f 100644 --- a/arch/arm/mach-imx/mach-mx31lilly.c +++ b/arch/arm/mach-imx/mach-mx31lilly.c @@ -243,6 +243,8 @@ core_param(mx31lilly_baseboard, mx31lilly_baseboard, int, 0444); static void __init mx31lilly_board_init(void) { + imx31_soc_init(); + switch (mx31lilly_baseboard) { case MX31LILLY_NOBOARD: break; diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c index ac9b4cad320..4b47fd9fdd8 100644 --- a/arch/arm/mach-imx/mach-mx31lite.c +++ b/arch/arm/mach-imx/mach-mx31lite.c @@ -230,6 +230,8 @@ static void __init mx31lite_init(void) { int ret; + imx31_soc_init(); + switch (mx31lite_baseboard) { case MX31LITE_NOBOARD: break; diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c index eaa51e49ca9..a52fd36e2b5 100644 --- a/arch/arm/mach-imx/mach-mx31moboard.c +++ b/arch/arm/mach-imx/mach-mx31moboard.c @@ -507,6 +507,8 @@ core_param(mx31moboard_baseboard, mx31moboard_baseboard, int, 0444); */ static void __init mx31moboard_init(void) { + imx31_soc_init(); + mxc_iomux_setup_multiple_pins(moboard_pins, ARRAY_SIZE(moboard_pins), "moboard"); diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c index 882880ac1bb..48b3c6fd5cf 100644 --- a/arch/arm/mach-imx/mach-mx35_3ds.c +++ b/arch/arm/mach-imx/mach-mx35_3ds.c @@ -179,6 +179,8 @@ static const struct imxi2c_platform_data mx35_3ds_i2c0_data __initconst = { */ static void __init mx35_3ds_init(void) { + imx35_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx35pdk_pads, ARRAY_SIZE(mx35pdk_pads)); imx35_add_fec(NULL); diff --git a/arch/arm/mach-imx/mach-mxt_td60.c b/arch/arm/mach-imx/mach-mxt_td60.c index 2774541511e..c85876fed66 100644 --- a/arch/arm/mach-imx/mach-mxt_td60.c +++ b/arch/arm/mach-imx/mach-mxt_td60.c @@ -233,6 +233,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = { static void __init mxt_td60_board_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(mxt_td60_pins, ARRAY_SIZE(mxt_td60_pins), "MXT_TD60"); diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c index bbddc5a11c4..71083aa1603 100644 --- a/arch/arm/mach-imx/mach-pca100.c +++ b/arch/arm/mach-imx/mach-pca100.c @@ -357,6 +357,8 @@ static void __init pca100_init(void) { int ret; + imx27_soc_init(); + /* SSI unit */ mxc_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0, MXC_AUDMUX_V1_PCR_SYN | /* 4wire mode */ diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c index 89c213b8129..f45b7cd72c8 100644 --- a/arch/arm/mach-imx/mach-pcm037.c +++ b/arch/arm/mach-imx/mach-pcm037.c @@ -576,6 +576,8 @@ static void __init pcm037_init(void) { int ret; + imx31_soc_init(); + mxc_iomux_set_gpr(MUX_PGP_UH2, 1); mxc_iomux_setup_multiple_pins(pcm037_pins, ARRAY_SIZE(pcm037_pins), diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c index 853bb871c7e..2d6a64bbac4 100644 --- a/arch/arm/mach-imx/mach-pcm038.c +++ b/arch/arm/mach-imx/mach-pcm038.c @@ -295,6 +295,8 @@ static const struct mxc_usbh_platform_data usbh2_pdata __initconst = { static void __init pcm038_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(pcm038_pins, ARRAY_SIZE(pcm038_pins), "PCM038"); diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c index 026441628df..163cc318caf 100644 --- a/arch/arm/mach-imx/mach-pcm043.c +++ b/arch/arm/mach-imx/mach-pcm043.c @@ -356,6 +356,8 @@ static struct esdhc_platform_data sd1_pdata = { */ static void __init pcm043_init(void) { + imx35_soc_init(); + mxc_iomux_v3_setup_multiple_pads(pcm043_pads, ARRAY_SIZE(pcm043_pads)); mxc_audmux_v2_configure_port(3, diff --git a/arch/arm/mach-imx/mach-qong.c b/arch/arm/mach-imx/mach-qong.c index c1632871593..3626f486498 100644 --- a/arch/arm/mach-imx/mach-qong.c +++ b/arch/arm/mach-imx/mach-qong.c @@ -244,6 +244,8 @@ static void __init qong_init_fpga(void) */ static void __init qong_init(void) { + imx31_soc_init(); + mxc_init_imx_uart(); qong_init_nor_mtd(); qong_init_fpga(); diff --git a/arch/arm/mach-imx/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c index dcaee043628..82805260e19 100644 --- a/arch/arm/mach-imx/mach-scb9328.c +++ b/arch/arm/mach-imx/mach-scb9328.c @@ -129,6 +129,8 @@ static struct platform_device *devices[] __initdata = { */ static void __init scb9328_init(void) { + imx1_soc_init(); + imx1_add_imx_uart0(&uart_pdata); printk(KERN_INFO"Scb9328: Adding devices\n"); diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c index d74e3473d23..7d8e012a633 100644 --- a/arch/arm/mach-imx/mach-vpr200.c +++ b/arch/arm/mach-imx/mach-vpr200.c @@ -267,6 +267,8 @@ static struct platform_device *devices[] __initdata = { */ static void __init vpr200_board_init(void) { + imx35_soc_init(); + mxc_iomux_v3_setup_multiple_pads(vpr200_pads, ARRAY_SIZE(vpr200_pads)); imx35_add_fec(NULL); diff --git a/arch/arm/mach-imx/mm-imx1.c b/arch/arm/mach-imx/mm-imx1.c index 2e482ba5a0e..2bded591d5c 100644 --- a/arch/arm/mach-imx/mm-imx1.c +++ b/arch/arm/mach-imx/mm-imx1.c @@ -23,7 +23,6 @@ #include <mach/common.h> #include <mach/hardware.h> -#include <mach/gpio.h> #include <mach/irqs.h> #include <mach/iomux-v1.h> @@ -44,15 +43,19 @@ void __init imx1_init_early(void) MX1_NUM_GPIO_PORT); } -static struct mxc_gpio_port imx1_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX1, 0, 1, MX1_GPIO_INT_PORTA), - DEFINE_IMX_GPIO_PORT_IRQ(MX1, 1, 2, MX1_GPIO_INT_PORTB), - DEFINE_IMX_GPIO_PORT_IRQ(MX1, 2, 3, MX1_GPIO_INT_PORTC), - DEFINE_IMX_GPIO_PORT_IRQ(MX1, 3, 4, MX1_GPIO_INT_PORTD), -}; - void __init mx1_init_irq(void) { mxc_init_irq(MX1_IO_ADDRESS(MX1_AVIC_BASE_ADDR)); - mxc_gpio_init(imx1_gpio_ports, ARRAY_SIZE(imx1_gpio_ports)); +} + +void __init imx1_soc_init(void) +{ + mxc_register_gpio("imx1-gpio", 0, MX1_GPIO1_BASE_ADDR, SZ_256, + MX1_GPIO_INT_PORTA, 0); + mxc_register_gpio("imx1-gpio", 1, MX1_GPIO2_BASE_ADDR, SZ_256, + MX1_GPIO_INT_PORTB, 0); + mxc_register_gpio("imx1-gpio", 2, MX1_GPIO3_BASE_ADDR, SZ_256, + MX1_GPIO_INT_PORTC, 0); + mxc_register_gpio("imx1-gpio", 3, MX1_GPIO4_BASE_ADDR, SZ_256, + MX1_GPIO_INT_PORTD, 0); } diff --git a/arch/arm/mach-imx/mm-imx21.c b/arch/arm/mach-imx/mm-imx21.c index 7a0c500ac2c..6d7d518686a 100644 --- a/arch/arm/mach-imx/mm-imx21.c +++ b/arch/arm/mach-imx/mm-imx21.c @@ -24,7 +24,6 @@ #include <mach/common.h> #include <asm/pgtable.h> #include <asm/mach/map.h> -#include <mach/gpio.h> #include <mach/irqs.h> #include <mach/iomux-v1.h> @@ -70,17 +69,17 @@ void __init imx21_init_early(void) MX21_NUM_GPIO_PORT); } -static struct mxc_gpio_port imx21_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX21, 0, 1, MX21_INT_GPIO), - DEFINE_IMX_GPIO_PORT(MX21, 1, 2), - DEFINE_IMX_GPIO_PORT(MX21, 2, 3), - DEFINE_IMX_GPIO_PORT(MX21, 3, 4), - DEFINE_IMX_GPIO_PORT(MX21, 4, 5), - DEFINE_IMX_GPIO_PORT(MX21, 5, 6), -}; - void __init mx21_init_irq(void) { mxc_init_irq(MX21_IO_ADDRESS(MX21_AVIC_BASE_ADDR)); - mxc_gpio_init(imx21_gpio_ports, ARRAY_SIZE(imx21_gpio_ports)); +} + +void __init imx21_soc_init(void) +{ + mxc_register_gpio("imx21-gpio", 0, MX21_GPIO1_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 1, MX21_GPIO2_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 2, MX21_GPIO3_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 3, MX21_GPIO4_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 4, MX21_GPIO5_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 5, MX21_GPIO6_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); } diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c index 02f7b5c7fa8..9a1591c2508 100644 --- a/arch/arm/mach-imx/mm-imx25.c +++ b/arch/arm/mach-imx/mm-imx25.c @@ -27,7 +27,6 @@ #include <mach/hardware.h> #include <mach/mx25.h> #include <mach/iomux-v3.h> -#include <mach/gpio.h> #include <mach/irqs.h> /* @@ -57,16 +56,16 @@ void __init imx25_init_early(void) mxc_arch_reset_init(MX25_IO_ADDRESS(MX25_WDOG_BASE_ADDR)); } -static struct mxc_gpio_port imx25_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX25, 0, 1, MX25_INT_GPIO1), - DEFINE_IMX_GPIO_PORT_IRQ(MX25, 1, 2, MX25_INT_GPIO2), - DEFINE_IMX_GPIO_PORT_IRQ(MX25, 2, 3, MX25_INT_GPIO3), - DEFINE_IMX_GPIO_PORT_IRQ(MX25, 3, 4, MX25_INT_GPIO4), -}; - void __init mx25_init_irq(void) { mxc_init_irq(MX25_IO_ADDRESS(MX25_AVIC_BASE_ADDR)); - mxc_gpio_init(imx25_gpio_ports, ARRAY_SIZE(imx25_gpio_ports)); } +void __init imx25_soc_init(void) +{ + /* i.mx25 has the i.mx31 type gpio */ + mxc_register_gpio("imx31-gpio", 0, MX25_GPIO1_BASE_ADDR, SZ_16K, MX25_INT_GPIO1, 0); + mxc_register_gpio("imx31-gpio", 1, MX25_GPIO2_BASE_ADDR, SZ_16K, MX25_INT_GPIO2, 0); + mxc_register_gpio("imx31-gpio", 2, MX25_GPIO3_BASE_ADDR, SZ_16K, MX25_INT_GPIO3, 0); + mxc_register_gpio("imx31-gpio", 3, MX25_GPIO4_BASE_ADDR, SZ_16K, MX25_INT_GPIO4, 0); +} diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c index a6761a39f08..133b30003dd 100644 --- a/arch/arm/mach-imx/mm-imx27.c +++ b/arch/arm/mach-imx/mm-imx27.c @@ -24,7 +24,6 @@ #include <mach/common.h> #include <asm/pgtable.h> #include <asm/mach/map.h> -#include <mach/gpio.h> #include <mach/irqs.h> #include <mach/iomux-v1.h> @@ -70,17 +69,18 @@ void __init imx27_init_early(void) MX27_NUM_GPIO_PORT); } -static struct mxc_gpio_port imx27_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX27, 0, 1, MX27_INT_GPIO), - DEFINE_IMX_GPIO_PORT(MX27, 1, 2), - DEFINE_IMX_GPIO_PORT(MX27, 2, 3), - DEFINE_IMX_GPIO_PORT(MX27, 3, 4), - DEFINE_IMX_GPIO_PORT(MX27, 4, 5), - DEFINE_IMX_GPIO_PORT(MX27, 5, 6), -}; - void __init mx27_init_irq(void) { mxc_init_irq(MX27_IO_ADDRESS(MX27_AVIC_BASE_ADDR)); - mxc_gpio_init(imx27_gpio_ports, ARRAY_SIZE(imx27_gpio_ports)); +} + +void __init imx27_soc_init(void) +{ + /* i.mx27 has the i.mx21 type gpio */ + mxc_register_gpio("imx21-gpio", 0, MX27_GPIO1_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 1, MX27_GPIO2_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 2, MX27_GPIO3_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 3, MX27_GPIO4_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 4, MX27_GPIO5_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 5, MX27_GPIO6_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); } diff --git a/arch/arm/mach-imx/mm-imx31.c b/arch/arm/mach-imx/mm-imx31.c index 86b9b45864d..6d103c01b8b 100644 --- a/arch/arm/mach-imx/mm-imx31.c +++ b/arch/arm/mach-imx/mm-imx31.c @@ -26,7 +26,6 @@ #include <mach/common.h> #include <mach/hardware.h> #include <mach/iomux-v3.h> -#include <mach/gpio.h> #include <mach/irqs.h> static struct map_desc mx31_io_desc[] __initdata = { @@ -53,14 +52,14 @@ void __init imx31_init_early(void) mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR)); } -static struct mxc_gpio_port imx31_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX31, 0, 1, MX31_INT_GPIO1), - DEFINE_IMX_GPIO_PORT_IRQ(MX31, 1, 2, MX31_INT_GPIO2), - DEFINE_IMX_GPIO_PORT_IRQ(MX31, 2, 3, MX31_INT_GPIO3), -}; - void __init mx31_init_irq(void) { mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR)); - mxc_gpio_init(imx31_gpio_ports, ARRAY_SIZE(imx31_gpio_ports)); +} + +void __init imx31_soc_init(void) +{ + mxc_register_gpio("imx31-gpio", 0, MX31_GPIO1_BASE_ADDR, SZ_16K, MX31_INT_GPIO1, 0); + mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0); + mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0); } diff --git a/arch/arm/mach-imx/mm-imx35.c b/arch/arm/mach-imx/mm-imx35.c index c880e6d1ae5..bb068bc8dab 100644 --- a/arch/arm/mach-imx/mm-imx35.c +++ b/arch/arm/mach-imx/mm-imx35.c @@ -27,7 +27,6 @@ #include <mach/common.h> #include <mach/hardware.h> #include <mach/iomux-v3.h> -#include <mach/gpio.h> #include <mach/irqs.h> static struct map_desc mx35_io_desc[] __initdata = { @@ -50,14 +49,15 @@ void __init imx35_init_early(void) mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR)); } -static struct mxc_gpio_port imx35_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX35, 0, 1, MX35_INT_GPIO1), - DEFINE_IMX_GPIO_PORT_IRQ(MX35, 1, 2, MX35_INT_GPIO2), - DEFINE_IMX_GPIO_PORT_IRQ(MX35, 2, 3, MX35_INT_GPIO3), -}; - void __init mx35_init_irq(void) { mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR)); - mxc_gpio_init(imx35_gpio_ports, ARRAY_SIZE(imx35_gpio_ports)); +} + +void __init imx35_soc_init(void) +{ + /* i.mx35 has the i.mx31 type gpio */ + mxc_register_gpio("imx31-gpio", 0, MX35_GPIO1_BASE_ADDR, SZ_16K, MX35_INT_GPIO1, 0); + mxc_register_gpio("imx31-gpio", 1, MX35_GPIO2_BASE_ADDR, SZ_16K, MX35_INT_GPIO2, 0); + mxc_register_gpio("imx31-gpio", 2, MX35_GPIO3_BASE_ADDR, SZ_16K, MX35_INT_GPIO3, 0); } diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 5c147fb66a0..a5b989728b9 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -17,6 +17,7 @@ * */ +#include <linux/dma-mapping.h> #include <linux/serial_8250.h> #include <linux/io.h> #ifdef CONFIG_MTD_PHYSMAP diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c index 7bb78fd5a2a..c79162a50f2 100644 --- a/arch/arm/mach-mmp/brownstone.c +++ b/arch/arm/mach-mmp/brownstone.c @@ -177,9 +177,16 @@ static struct i2c_board_info brownstone_twsi1_info[] = { }; static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = { - .max_speed = 25000000, + .clk_delay_cycles = 0x1f, }; +static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc2 = { + .clk_delay_cycles = 0x1f, + .flags = PXA_FLAG_CARD_PERMANENT + | PXA_FLAG_SD_8_BIT_CAPABLE_SLOT, +}; + + static void __init brownstone_init(void) { mfp_config(ARRAY_AND_SIZE(brownstone_pin_config)); @@ -189,6 +196,7 @@ static void __init brownstone_init(void) mmp2_add_uart(3); mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info)); mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */ + mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */ /* enable 5v regulator */ platform_device_register(&brownstone_v_5vp_device); diff --git a/arch/arm/mach-mmp/include/mach/mmp2.h b/arch/arm/mach-mmp/include/mach/mmp2.h index 2cbf6df09b8..de7b88826ad 100644 --- a/arch/arm/mach-mmp/include/mach/mmp2.h +++ b/arch/arm/mach-mmp/include/mach/mmp2.h @@ -1,7 +1,7 @@ #ifndef __ASM_MACH_MMP2_H #define __ASM_MACH_MMP2_H -#include <plat/sdhci.h> +#include <linux/platform_data/pxa_sdhci.h> struct sys_timer; diff --git a/arch/arm/mach-mmp/jasper.c b/arch/arm/mach-mmp/jasper.c index 24172a0aad5..5d6421d6325 100644 --- a/arch/arm/mach-mmp/jasper.c +++ b/arch/arm/mach-mmp/jasper.c @@ -154,7 +154,7 @@ static struct i2c_board_info jasper_twsi1_info[] = { }; static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = { - .max_speed = 25000000, + .clk_delay_cycles = 0x1f, }; static void __init jasper_init(void) diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c index 8e6c3ac7f7c..079c18861d5 100644 --- a/arch/arm/mach-mmp/mmp2.c +++ b/arch/arm/mach-mmp/mmp2.c @@ -168,10 +168,10 @@ static struct clk_lookup mmp2_clkregs[] = { INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL), INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL), INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), - INIT_CLKREG(&clk_sdh0, "sdhci-pxa.0", "PXA-SDHCLK"), - INIT_CLKREG(&clk_sdh1, "sdhci-pxa.1", "PXA-SDHCLK"), - INIT_CLKREG(&clk_sdh2, "sdhci-pxa.2", "PXA-SDHCLK"), - INIT_CLKREG(&clk_sdh3, "sdhci-pxa.3", "PXA-SDHCLK"), + INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"), + INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"), + INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"), + INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"), }; static int __init mmp2_init(void) @@ -222,8 +222,8 @@ MMP2_DEVICE(twsi4, "pxa2xx-i2c", 3, TWSI4, 0xd4033000, 0x70); MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70); MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70); MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29); -MMP2_DEVICE(sdh0, "sdhci-pxa", 0, MMC, 0xd4280000, 0x120); -MMP2_DEVICE(sdh1, "sdhci-pxa", 1, MMC2, 0xd4280800, 0x120); -MMP2_DEVICE(sdh2, "sdhci-pxa", 2, MMC3, 0xd4281000, 0x120); -MMP2_DEVICE(sdh3, "sdhci-pxa", 3, MMC4, 0xd4281800, 0x120); +MMP2_DEVICE(sdh0, "sdhci-pxav3", 0, MMC, 0xd4280000, 0x120); +MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120); +MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120); +MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120); diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 1516896e8d1..888e92502e1 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -148,22 +148,6 @@ config MACH_MSM8960_RUMI3 endmenu -config MSM_IOMMU - bool "MSM IOMMU Support" - depends on ARCH_MSM8X60 || ARCH_MSM8960 - select IOMMU_API - default n - help - Support for the IOMMUs found on certain Qualcomm SOCs. - These IOMMUs allow virtualization of the address space used by most - cores within the multimedia subsystem. - - If unsure, say N here. - -config IOMMU_PGTABLES_L2 - def_bool y - depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n - config MSM_DEBUG_UART int default 1 if MSM_DEBUG_UART1 @@ -205,9 +189,6 @@ config MSM_GPIOMUX config MSM_V2_TLMM bool -config IOMMU_API - bool - config MSM_SCM bool endif diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile index 9519fd28a02..b70658c5ae0 100644 --- a/arch/arm/mach-msm/Makefile +++ b/arch/arm/mach-msm/Makefile @@ -3,7 +3,7 @@ obj-y += clock.o obj-$(CONFIG_DEBUG_FS) += clock-debug.o obj-$(CONFIG_MSM_VIC) += irq-vic.o -obj-$(CONFIG_MSM_IOMMU) += iommu.o iommu_dev.o devices-iommu.o +obj-$(CONFIG_MSM_IOMMU) += devices-iommu.o obj-$(CONFIG_ARCH_MSM7X00A) += dma.o irq.o acpuclock-arm11.o obj-$(CONFIG_ARCH_MSM7X30) += dma.o diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c deleted file mode 100644 index 1a584e077c6..00000000000 --- a/arch/arm/mach-msm/iommu.c +++ /dev/null @@ -1,731 +0,0 @@ -/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/errno.h> -#include <linux/io.h> -#include <linux/interrupt.h> -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/slab.h> -#include <linux/iommu.h> -#include <linux/clk.h> - -#include <asm/cacheflush.h> -#include <asm/sizes.h> - -#include <mach/iommu_hw-8xxx.h> -#include <mach/iommu.h> - -#define MRC(reg, processor, op1, crn, crm, op2) \ -__asm__ __volatile__ ( \ -" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ -: "=r" (reg)) - -#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) -#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) - -static int msm_iommu_tex_class[4]; - -DEFINE_SPINLOCK(msm_iommu_lock); - -struct msm_priv { - unsigned long *pgtable; - struct list_head list_attached; -}; - -static int __enable_clocks(struct msm_iommu_drvdata *drvdata) -{ - int ret; - - ret = clk_enable(drvdata->pclk); - if (ret) - goto fail; - - if (drvdata->clk) { - ret = clk_enable(drvdata->clk); - if (ret) - clk_disable(drvdata->pclk); - } -fail: - return ret; -} - -static void __disable_clocks(struct msm_iommu_drvdata *drvdata) -{ - if (drvdata->clk) - clk_disable(drvdata->clk); - clk_disable(drvdata->pclk); -} - -static int __flush_iotlb(struct iommu_domain *domain) -{ - struct msm_priv *priv = domain->priv; - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; - int ret = 0; -#ifndef CONFIG_IOMMU_PGTABLES_L2 - unsigned long *fl_table = priv->pgtable; - int i; - - if (!list_empty(&priv->list_attached)) { - dmac_flush_range(fl_table, fl_table + SZ_16K); - - for (i = 0; i < NUM_FL_PTE; i++) - if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { - void *sl_table = __va(fl_table[i] & - FL_BASE_MASK); - dmac_flush_range(sl_table, sl_table + SZ_4K); - } - } -#endif - - list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { - if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent) - BUG(); - - iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); - BUG_ON(!iommu_drvdata); - - ret = __enable_clocks(iommu_drvdata); - if (ret) - goto fail; - - SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); - __disable_clocks(iommu_drvdata); - } -fail: - return ret; -} - -static void __reset_context(void __iomem *base, int ctx) -{ - SET_BPRCOSH(base, ctx, 0); - SET_BPRCISH(base, ctx, 0); - SET_BPRCNSH(base, ctx, 0); - SET_BPSHCFG(base, ctx, 0); - SET_BPMTCFG(base, ctx, 0); - SET_ACTLR(base, ctx, 0); - SET_SCTLR(base, ctx, 0); - SET_FSRRESTORE(base, ctx, 0); - SET_TTBR0(base, ctx, 0); - SET_TTBR1(base, ctx, 0); - SET_TTBCR(base, ctx, 0); - SET_BFBCR(base, ctx, 0); - SET_PAR(base, ctx, 0); - SET_FAR(base, ctx, 0); - SET_CTX_TLBIALL(base, ctx, 0); - SET_TLBFLPTER(base, ctx, 0); - SET_TLBSLPTER(base, ctx, 0); - SET_TLBLKCR(base, ctx, 0); - SET_PRRR(base, ctx, 0); - SET_NMRR(base, ctx, 0); -} - -static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) -{ - unsigned int prrr, nmrr; - __reset_context(base, ctx); - - /* Set up HTW mode */ - /* TLB miss configuration: perform HTW on miss */ - SET_TLBMCFG(base, ctx, 0x3); - - /* V2P configuration: HTW for access */ - SET_V2PCFG(base, ctx, 0x3); - - SET_TTBCR(base, ctx, 0); - SET_TTBR0_PA(base, ctx, (pgtable >> 14)); - - /* Invalidate the TLB for this context */ - SET_CTX_TLBIALL(base, ctx, 0); - - /* Set interrupt number to "secure" interrupt */ - SET_IRPTNDX(base, ctx, 0); - - /* Enable context fault interrupt */ - SET_CFEIE(base, ctx, 1); - - /* Stall access on a context fault and let the handler deal with it */ - SET_CFCFG(base, ctx, 1); - - /* Redirect all cacheable requests to L2 slave port. */ - SET_RCISH(base, ctx, 1); - SET_RCOSH(base, ctx, 1); - SET_RCNSH(base, ctx, 1); - - /* Turn on TEX Remap */ - SET_TRE(base, ctx, 1); - - /* Set TEX remap attributes */ - RCP15_PRRR(prrr); - RCP15_NMRR(nmrr); - SET_PRRR(base, ctx, prrr); - SET_NMRR(base, ctx, nmrr); - - /* Turn on BFB prefetch */ - SET_BFBDFE(base, ctx, 1); - -#ifdef CONFIG_IOMMU_PGTABLES_L2 - /* Configure page tables as inner-cacheable and shareable to reduce - * the TLB miss penalty. - */ - SET_TTBR0_SH(base, ctx, 1); - SET_TTBR1_SH(base, ctx, 1); - - SET_TTBR0_NOS(base, ctx, 1); - SET_TTBR1_NOS(base, ctx, 1); - - SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */ - SET_TTBR0_IRGNL(base, ctx, 1); - - SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */ - SET_TTBR1_IRGNL(base, ctx, 1); - - SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */ - SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */ -#endif - - /* Enable the MMU */ - SET_M(base, ctx, 1); -} - -static int msm_iommu_domain_init(struct iommu_domain *domain) -{ - struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); - - if (!priv) - goto fail_nomem; - - INIT_LIST_HEAD(&priv->list_attached); - priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL, - get_order(SZ_16K)); - - if (!priv->pgtable) - goto fail_nomem; - - memset(priv->pgtable, 0, SZ_16K); - domain->priv = priv; - return 0; - -fail_nomem: - kfree(priv); - return -ENOMEM; -} - -static void msm_iommu_domain_destroy(struct iommu_domain *domain) -{ - struct msm_priv *priv; - unsigned long flags; - unsigned long *fl_table; - int i; - - spin_lock_irqsave(&msm_iommu_lock, flags); - priv = domain->priv; - domain->priv = NULL; - - if (priv) { - fl_table = priv->pgtable; - - for (i = 0; i < NUM_FL_PTE; i++) - if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) - free_page((unsigned long) __va(((fl_table[i]) & - FL_BASE_MASK))); - - free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); - priv->pgtable = NULL; - } - - kfree(priv); - spin_unlock_irqrestore(&msm_iommu_lock, flags); -} - -static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) -{ - struct msm_priv *priv; - struct msm_iommu_ctx_dev *ctx_dev; - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; - struct msm_iommu_ctx_drvdata *tmp_drvdata; - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&msm_iommu_lock, flags); - - priv = domain->priv; - - if (!priv || !dev) { - ret = -EINVAL; - goto fail; - } - - iommu_drvdata = dev_get_drvdata(dev->parent); - ctx_drvdata = dev_get_drvdata(dev); - ctx_dev = dev->platform_data; - - if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) { - ret = -EINVAL; - goto fail; - } - - if (!list_empty(&ctx_drvdata->attached_elm)) { - ret = -EBUSY; - goto fail; - } - - list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) - if (tmp_drvdata == ctx_drvdata) { - ret = -EBUSY; - goto fail; - } - - ret = __enable_clocks(iommu_drvdata); - if (ret) - goto fail; - - __program_context(iommu_drvdata->base, ctx_dev->num, - __pa(priv->pgtable)); - - __disable_clocks(iommu_drvdata); - list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); - ret = __flush_iotlb(domain); - -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); - return ret; -} - -static void msm_iommu_detach_dev(struct iommu_domain *domain, - struct device *dev) -{ - struct msm_priv *priv; - struct msm_iommu_ctx_dev *ctx_dev; - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; - unsigned long flags; - int ret; - - spin_lock_irqsave(&msm_iommu_lock, flags); - priv = domain->priv; - - if (!priv || !dev) - goto fail; - - iommu_drvdata = dev_get_drvdata(dev->parent); - ctx_drvdata = dev_get_drvdata(dev); - ctx_dev = dev->platform_data; - - if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) - goto fail; - - ret = __flush_iotlb(domain); - if (ret) - goto fail; - - ret = __enable_clocks(iommu_drvdata); - if (ret) - goto fail; - - __reset_context(iommu_drvdata->base, ctx_dev->num); - __disable_clocks(iommu_drvdata); - list_del_init(&ctx_drvdata->attached_elm); - -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); -} - -static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, - phys_addr_t pa, int order, int prot) -{ - struct msm_priv *priv; - unsigned long flags; - unsigned long *fl_table; - unsigned long *fl_pte; - unsigned long fl_offset; - unsigned long *sl_table; - unsigned long *sl_pte; - unsigned long sl_offset; - unsigned int pgprot; - size_t len = 0x1000UL << order; - int ret = 0, tex, sh; - - spin_lock_irqsave(&msm_iommu_lock, flags); - - sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0; - tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK]; - - if (tex < 0 || tex > NUM_TEX_CLASS - 1) { - ret = -EINVAL; - goto fail; - } - - priv = domain->priv; - if (!priv) { - ret = -EINVAL; - goto fail; - } - - fl_table = priv->pgtable; - - if (len != SZ_16M && len != SZ_1M && - len != SZ_64K && len != SZ_4K) { - pr_debug("Bad size: %d\n", len); - ret = -EINVAL; - goto fail; - } - - if (!fl_table) { - pr_debug("Null page table\n"); - ret = -EINVAL; - goto fail; - } - - if (len == SZ_16M || len == SZ_1M) { - pgprot = sh ? FL_SHARED : 0; - pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; - pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; - pgprot |= tex & 0x04 ? FL_TEX0 : 0; - } else { - pgprot = sh ? SL_SHARED : 0; - pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; - pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; - pgprot |= tex & 0x04 ? SL_TEX0 : 0; - } - - fl_offset = FL_OFFSET(va); /* Upper 12 bits */ - fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ - - if (len == SZ_16M) { - int i = 0; - for (i = 0; i < 16; i++) - *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | - FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | - FL_SHARED | FL_NG | pgprot; - } - - if (len == SZ_1M) - *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG | - FL_TYPE_SECT | FL_SHARED | pgprot; - - /* Need a 2nd level table */ - if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { - unsigned long *sl; - sl = (unsigned long *) __get_free_pages(GFP_ATOMIC, - get_order(SZ_4K)); - - if (!sl) { - pr_debug("Could not allocate second level table\n"); - ret = -ENOMEM; - goto fail; - } - - memset(sl, 0, SZ_4K); - *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE); - } - - sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); - sl_offset = SL_OFFSET(va); - sl_pte = sl_table + sl_offset; - - - if (len == SZ_4K) - *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG | - SL_SHARED | SL_TYPE_SMALL | pgprot; - - if (len == SZ_64K) { - int i; - - for (i = 0; i < 16; i++) - *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | - SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; - } - - ret = __flush_iotlb(domain); -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); - return ret; -} - -static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, - int order) -{ - struct msm_priv *priv; - unsigned long flags; - unsigned long *fl_table; - unsigned long *fl_pte; - unsigned long fl_offset; - unsigned long *sl_table; - unsigned long *sl_pte; - unsigned long sl_offset; - size_t len = 0x1000UL << order; - int i, ret = 0; - - spin_lock_irqsave(&msm_iommu_lock, flags); - - priv = domain->priv; - - if (!priv) { - ret = -ENODEV; - goto fail; - } - - fl_table = priv->pgtable; - - if (len != SZ_16M && len != SZ_1M && - len != SZ_64K && len != SZ_4K) { - pr_debug("Bad length: %d\n", len); - ret = -EINVAL; - goto fail; - } - - if (!fl_table) { - pr_debug("Null page table\n"); - ret = -EINVAL; - goto fail; - } - - fl_offset = FL_OFFSET(va); /* Upper 12 bits */ - fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ - - if (*fl_pte == 0) { - pr_debug("First level PTE is 0\n"); - ret = -ENODEV; - goto fail; - } - - /* Unmap supersection */ - if (len == SZ_16M) - for (i = 0; i < 16; i++) - *(fl_pte+i) = 0; - - if (len == SZ_1M) - *fl_pte = 0; - - sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); - sl_offset = SL_OFFSET(va); - sl_pte = sl_table + sl_offset; - - if (len == SZ_64K) { - for (i = 0; i < 16; i++) - *(sl_pte+i) = 0; - } - - if (len == SZ_4K) - *sl_pte = 0; - - if (len == SZ_4K || len == SZ_64K) { - int used = 0; - - for (i = 0; i < NUM_SL_PTE; i++) - if (sl_table[i]) - used = 1; - if (!used) { - free_page((unsigned long)sl_table); - *fl_pte = 0; - } - } - - ret = __flush_iotlb(domain); -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); - return ret; -} - -static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, - unsigned long va) -{ - struct msm_priv *priv; - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; - unsigned int par; - unsigned long flags; - void __iomem *base; - phys_addr_t ret = 0; - int ctx; - - spin_lock_irqsave(&msm_iommu_lock, flags); - - priv = domain->priv; - if (list_empty(&priv->list_attached)) - goto fail; - - ctx_drvdata = list_entry(priv->list_attached.next, - struct msm_iommu_ctx_drvdata, attached_elm); - iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); - - base = iommu_drvdata->base; - ctx = ctx_drvdata->num; - - ret = __enable_clocks(iommu_drvdata); - if (ret) - goto fail; - - /* Invalidate context TLB */ - SET_CTX_TLBIALL(base, ctx, 0); - SET_V2PPR(base, ctx, va & V2Pxx_VA); - - par = GET_PAR(base, ctx); - - /* We are dealing with a supersection */ - if (GET_NOFAULT_SS(base, ctx)) - ret = (par & 0xFF000000) | (va & 0x00FFFFFF); - else /* Upper 20 bits from PAR, lower 12 from VA */ - ret = (par & 0xFFFFF000) | (va & 0x00000FFF); - - if (GET_FAULT(base, ctx)) - ret = 0; - - __disable_clocks(iommu_drvdata); -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); - return ret; -} - -static int msm_iommu_domain_has_cap(struct iommu_domain *domain, - unsigned long cap) -{ - return 0; -} - -static void print_ctx_regs(void __iomem *base, int ctx) -{ - unsigned int fsr = GET_FSR(base, ctx); - pr_err("FAR = %08x PAR = %08x\n", - GET_FAR(base, ctx), GET_PAR(base, ctx)); - pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, - (fsr & 0x02) ? "TF " : "", - (fsr & 0x04) ? "AFF " : "", - (fsr & 0x08) ? "APF " : "", - (fsr & 0x10) ? "TLBMF " : "", - (fsr & 0x20) ? "HTWDEEF " : "", - (fsr & 0x40) ? "HTWSEEF " : "", - (fsr & 0x80) ? "MHF " : "", - (fsr & 0x10000) ? "SL " : "", - (fsr & 0x40000000) ? "SS " : "", - (fsr & 0x80000000) ? "MULTI " : ""); - - pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", - GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); - pr_err("TTBR0 = %08x TTBR1 = %08x\n", - GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); - pr_err("SCTLR = %08x ACTLR = %08x\n", - GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); - pr_err("PRRR = %08x NMRR = %08x\n", - GET_PRRR(base, ctx), GET_NMRR(base, ctx)); -} - -irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) -{ - struct msm_iommu_drvdata *drvdata = dev_id; - void __iomem *base; - unsigned int fsr; - int i, ret; - - spin_lock(&msm_iommu_lock); - - if (!drvdata) { - pr_err("Invalid device ID in context interrupt handler\n"); - goto fail; - } - - base = drvdata->base; - - pr_err("Unexpected IOMMU page fault!\n"); - pr_err("base = %08x\n", (unsigned int) base); - - ret = __enable_clocks(drvdata); - if (ret) - goto fail; - - for (i = 0; i < drvdata->ncb; i++) { - fsr = GET_FSR(base, i); - if (fsr) { - pr_err("Fault occurred in context %d.\n", i); - pr_err("Interesting registers:\n"); - print_ctx_regs(base, i); - SET_FSR(base, i, 0x4000000F); - } - } - __disable_clocks(drvdata); -fail: - spin_unlock(&msm_iommu_lock); - return 0; -} - -static struct iommu_ops msm_iommu_ops = { - .domain_init = msm_iommu_domain_init, - .domain_destroy = msm_iommu_domain_destroy, - .attach_dev = msm_iommu_attach_dev, - .detach_dev = msm_iommu_detach_dev, - .map = msm_iommu_map, - .unmap = msm_iommu_unmap, - .iova_to_phys = msm_iommu_iova_to_phys, - .domain_has_cap = msm_iommu_domain_has_cap -}; - -static int __init get_tex_class(int icp, int ocp, int mt, int nos) -{ - int i = 0; - unsigned int prrr = 0; - unsigned int nmrr = 0; - int c_icp, c_ocp, c_mt, c_nos; - - RCP15_PRRR(prrr); - RCP15_NMRR(nmrr); - - for (i = 0; i < NUM_TEX_CLASS; i++) { - c_nos = PRRR_NOS(prrr, i); - c_mt = PRRR_MT(prrr, i); - c_icp = NMRR_ICP(nmrr, i); - c_ocp = NMRR_OCP(nmrr, i); - - if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) - return i; - } - - return -ENODEV; -} - -static void __init setup_iommu_tex_classes(void) -{ - msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = - get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); - - msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = - get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); - - msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = - get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); - - msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = - get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); -} - -static int __init msm_iommu_init(void) -{ - setup_iommu_tex_classes(); - register_iommu(&msm_iommu_ops); - return 0; -} - -subsys_initcall(msm_iommu_init); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); diff --git a/arch/arm/mach-msm/iommu_dev.c b/arch/arm/mach-msm/iommu_dev.c deleted file mode 100644 index 8e8fb079852..00000000000 --- a/arch/arm/mach-msm/iommu_dev.c +++ /dev/null @@ -1,422 +0,0 @@ -/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/io.h> -#include <linux/clk.h> -#include <linux/iommu.h> -#include <linux/interrupt.h> -#include <linux/err.h> -#include <linux/slab.h> - -#include <mach/iommu_hw-8xxx.h> -#include <mach/iommu.h> -#include <mach/clk.h> - -struct iommu_ctx_iter_data { - /* input */ - const char *name; - - /* output */ - struct device *dev; -}; - -static struct platform_device *msm_iommu_root_dev; - -static int each_iommu_ctx(struct device *dev, void *data) -{ - struct iommu_ctx_iter_data *res = data; - struct msm_iommu_ctx_dev *c = dev->platform_data; - - if (!res || !c || !c->name || !res->name) - return -EINVAL; - - if (!strcmp(res->name, c->name)) { - res->dev = dev; - return 1; - } - return 0; -} - -static int each_iommu(struct device *dev, void *data) -{ - return device_for_each_child(dev, data, each_iommu_ctx); -} - -struct device *msm_iommu_get_ctx(const char *ctx_name) -{ - struct iommu_ctx_iter_data r; - int found; - - if (!msm_iommu_root_dev) { - pr_err("No root IOMMU device.\n"); - goto fail; - } - - r.name = ctx_name; - found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu); - - if (!found) { - pr_err("Could not find context <%s>\n", ctx_name); - goto fail; - } - - return r.dev; -fail: - return NULL; -} -EXPORT_SYMBOL(msm_iommu_get_ctx); - -static void msm_iommu_reset(void __iomem *base, int ncb) -{ - int ctx; - - SET_RPUE(base, 0); - SET_RPUEIE(base, 0); - SET_ESRRESTORE(base, 0); - SET_TBE(base, 0); - SET_CR(base, 0); - SET_SPDMBE(base, 0); - SET_TESTBUSCR(base, 0); - SET_TLBRSW(base, 0); - SET_GLOBAL_TLBIALL(base, 0); - SET_RPU_ACR(base, 0); - SET_TLBLKCRWE(base, 1); - - for (ctx = 0; ctx < ncb; ctx++) { - SET_BPRCOSH(base, ctx, 0); - SET_BPRCISH(base, ctx, 0); - SET_BPRCNSH(base, ctx, 0); - SET_BPSHCFG(base, ctx, 0); - SET_BPMTCFG(base, ctx, 0); - SET_ACTLR(base, ctx, 0); - SET_SCTLR(base, ctx, 0); - SET_FSRRESTORE(base, ctx, 0); - SET_TTBR0(base, ctx, 0); - SET_TTBR1(base, ctx, 0); - SET_TTBCR(base, ctx, 0); - SET_BFBCR(base, ctx, 0); - SET_PAR(base, ctx, 0); - SET_FAR(base, ctx, 0); - SET_CTX_TLBIALL(base, ctx, 0); - SET_TLBFLPTER(base, ctx, 0); - SET_TLBSLPTER(base, ctx, 0); - SET_TLBLKCR(base, ctx, 0); - SET_PRRR(base, ctx, 0); - SET_NMRR(base, ctx, 0); - SET_CONTEXTIDR(base, ctx, 0); - } -} - -static int msm_iommu_probe(struct platform_device *pdev) -{ - struct resource *r, *r2; - struct clk *iommu_clk; - struct clk *iommu_pclk; - struct msm_iommu_drvdata *drvdata; - struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; - void __iomem *regs_base; - resource_size_t len; - int ret, irq, par; - - if (pdev->id == -1) { - msm_iommu_root_dev = pdev; - return 0; - } - - drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL); - - if (!drvdata) { - ret = -ENOMEM; - goto fail; - } - - if (!iommu_dev) { - ret = -ENODEV; - goto fail; - } - - iommu_pclk = clk_get(NULL, "smmu_pclk"); - if (IS_ERR(iommu_pclk)) { - ret = -ENODEV; - goto fail; - } - - ret = clk_enable(iommu_pclk); - if (ret) - goto fail_enable; - - iommu_clk = clk_get(&pdev->dev, "iommu_clk"); - - if (!IS_ERR(iommu_clk)) { - if (clk_get_rate(iommu_clk) == 0) - clk_set_min_rate(iommu_clk, 1); - - ret = clk_enable(iommu_clk); - if (ret) { - clk_put(iommu_clk); - goto fail_pclk; - } - } else - iommu_clk = NULL; - - r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase"); - - if (!r) { - ret = -ENODEV; - goto fail_clk; - } - - len = resource_size(r); - - r2 = request_mem_region(r->start, len, r->name); - if (!r2) { - pr_err("Could not request memory region: start=%p, len=%d\n", - (void *) r->start, len); - ret = -EBUSY; - goto fail_clk; - } - - regs_base = ioremap(r2->start, len); - - if (!regs_base) { - pr_err("Could not ioremap: start=%p, len=%d\n", - (void *) r2->start, len); - ret = -EBUSY; - goto fail_mem; - } - - irq = platform_get_irq_byname(pdev, "secure_irq"); - if (irq < 0) { - ret = -ENODEV; - goto fail_io; - } - - msm_iommu_reset(regs_base, iommu_dev->ncb); - - SET_M(regs_base, 0, 1); - SET_PAR(regs_base, 0, 0); - SET_V2PCFG(regs_base, 0, 1); - SET_V2PPR(regs_base, 0, 0); - par = GET_PAR(regs_base, 0); - SET_V2PCFG(regs_base, 0, 0); - SET_M(regs_base, 0, 0); - - if (!par) { - pr_err("%s: Invalid PAR value detected\n", iommu_dev->name); - ret = -ENODEV; - goto fail_io; - } - - ret = request_irq(irq, msm_iommu_fault_handler, 0, - "msm_iommu_secure_irpt_handler", drvdata); - if (ret) { - pr_err("Request IRQ %d failed with ret=%d\n", irq, ret); - goto fail_io; - } - - - drvdata->pclk = iommu_pclk; - drvdata->clk = iommu_clk; - drvdata->base = regs_base; - drvdata->irq = irq; - drvdata->ncb = iommu_dev->ncb; - - pr_info("device %s mapped at %p, irq %d with %d ctx banks\n", - iommu_dev->name, regs_base, irq, iommu_dev->ncb); - - platform_set_drvdata(pdev, drvdata); - - if (iommu_clk) - clk_disable(iommu_clk); - - clk_disable(iommu_pclk); - - return 0; -fail_io: - iounmap(regs_base); -fail_mem: - release_mem_region(r->start, len); -fail_clk: - if (iommu_clk) { - clk_disable(iommu_clk); - clk_put(iommu_clk); - } -fail_pclk: - clk_disable(iommu_pclk); -fail_enable: - clk_put(iommu_pclk); -fail: - kfree(drvdata); - return ret; -} - -static int msm_iommu_remove(struct platform_device *pdev) -{ - struct msm_iommu_drvdata *drv = NULL; - - drv = platform_get_drvdata(pdev); - if (drv) { - if (drv->clk) - clk_put(drv->clk); - clk_put(drv->pclk); - memset(drv, 0, sizeof(*drv)); - kfree(drv); - platform_set_drvdata(pdev, NULL); - } - return 0; -} - -static int msm_iommu_ctx_probe(struct platform_device *pdev) -{ - struct msm_iommu_ctx_dev *c = pdev->dev.platform_data; - struct msm_iommu_drvdata *drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL; - int i, ret; - if (!c || !pdev->dev.parent) { - ret = -EINVAL; - goto fail; - } - - drvdata = dev_get_drvdata(pdev->dev.parent); - - if (!drvdata) { - ret = -ENODEV; - goto fail; - } - - ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL); - if (!ctx_drvdata) { - ret = -ENOMEM; - goto fail; - } - ctx_drvdata->num = c->num; - ctx_drvdata->pdev = pdev; - - INIT_LIST_HEAD(&ctx_drvdata->attached_elm); - platform_set_drvdata(pdev, ctx_drvdata); - - ret = clk_enable(drvdata->pclk); - if (ret) - goto fail; - - if (drvdata->clk) { - ret = clk_enable(drvdata->clk); - if (ret) { - clk_disable(drvdata->pclk); - goto fail; - } - } - - /* Program the M2V tables for this context */ - for (i = 0; i < MAX_NUM_MIDS; i++) { - int mid = c->mids[i]; - if (mid == -1) - break; - - SET_M2VCBR_N(drvdata->base, mid, 0); - SET_CBACR_N(drvdata->base, c->num, 0); - - /* Set VMID = 0 */ - SET_VMID(drvdata->base, mid, 0); - - /* Set the context number for that MID to this context */ - SET_CBNDX(drvdata->base, mid, c->num); - - /* Set MID associated with this context bank to 0*/ - SET_CBVMID(drvdata->base, c->num, 0); - - /* Set the ASID for TLB tagging for this context */ - SET_CONTEXTIDR_ASID(drvdata->base, c->num, c->num); - - /* Set security bit override to be Non-secure */ - SET_NSCFG(drvdata->base, mid, 3); - } - - if (drvdata->clk) - clk_disable(drvdata->clk); - clk_disable(drvdata->pclk); - - dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num); - return 0; -fail: - kfree(ctx_drvdata); - return ret; -} - -static int msm_iommu_ctx_remove(struct platform_device *pdev) -{ - struct msm_iommu_ctx_drvdata *drv = NULL; - drv = platform_get_drvdata(pdev); - if (drv) { - memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata)); - kfree(drv); - platform_set_drvdata(pdev, NULL); - } - return 0; -} - -static struct platform_driver msm_iommu_driver = { - .driver = { - .name = "msm_iommu", - }, - .probe = msm_iommu_probe, - .remove = msm_iommu_remove, -}; - -static struct platform_driver msm_iommu_ctx_driver = { - .driver = { - .name = "msm_iommu_ctx", - }, - .probe = msm_iommu_ctx_probe, - .remove = msm_iommu_ctx_remove, -}; - -static int __init msm_iommu_driver_init(void) -{ - int ret; - ret = platform_driver_register(&msm_iommu_driver); - if (ret != 0) { - pr_err("Failed to register IOMMU driver\n"); - goto error; - } - - ret = platform_driver_register(&msm_iommu_ctx_driver); - if (ret != 0) { - pr_err("Failed to register IOMMU context driver\n"); - goto error; - } - -error: - return ret; -} - -static void __exit msm_iommu_driver_exit(void) -{ - platform_driver_unregister(&msm_iommu_ctx_driver); - platform_driver_unregister(&msm_iommu_driver); -} - -subsys_initcall(msm_iommu_driver_init); -module_exit(msm_iommu_driver_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-mx5/board-cpuimx51.c index 4efa02ee163..add0d42de7a 100644 --- a/arch/arm/mach-mx5/board-cpuimx51.c +++ b/arch/arm/mach-mx5/board-cpuimx51.c @@ -245,6 +245,8 @@ __setup("otg_mode=", eukrea_cpuimx51_otg_mode); */ static void __init eukrea_cpuimx51_init(void) { + imx51_soc_init(); + mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51_pads, ARRAY_SIZE(eukrea_cpuimx51_pads)); diff --git a/arch/arm/mach-mx5/board-cpuimx51sd.c b/arch/arm/mach-mx5/board-cpuimx51sd.c index 5ef25a59614..ff096d58729 100644 --- a/arch/arm/mach-mx5/board-cpuimx51sd.c +++ b/arch/arm/mach-mx5/board-cpuimx51sd.c @@ -264,6 +264,8 @@ static struct platform_device *platform_devices[] __initdata = { static void __init eukrea_cpuimx51sd_init(void) { + imx51_soc_init(); + mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51sd_pads, ARRAY_SIZE(eukrea_cpuimx51sd_pads)); diff --git a/arch/arm/mach-mx5/board-mx50_rdp.c b/arch/arm/mach-mx5/board-mx50_rdp.c index 11210e1ae42..7de25c6712e 100644 --- a/arch/arm/mach-mx5/board-mx50_rdp.c +++ b/arch/arm/mach-mx5/board-mx50_rdp.c @@ -192,6 +192,8 @@ static const struct imxi2c_platform_data i2c_data __initconst = { */ static void __init mx50_rdp_board_init(void) { + imx50_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx50_rdp_pads, ARRAY_SIZE(mx50_rdp_pads)); diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-mx5/board-mx51_3ds.c index 63dfbeafbc1..3112d15feeb 100644 --- a/arch/arm/mach-mx5/board-mx51_3ds.c +++ b/arch/arm/mach-mx5/board-mx51_3ds.c @@ -135,6 +135,8 @@ static struct spi_board_info mx51_3ds_spi_nor_device[] = { */ static void __init mx51_3ds_init(void) { + imx51_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx51_3ds_pads, ARRAY_SIZE(mx51_3ds_pads)); diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c index c7b3fabf50f..6021dd00ec7 100644 --- a/arch/arm/mach-mx5/board-mx51_babbage.c +++ b/arch/arm/mach-mx5/board-mx51_babbage.c @@ -340,6 +340,8 @@ static void __init mx51_babbage_init(void) iomux_v3_cfg_t power_key = _MX51_PAD_EIM_A27__GPIO2_21 | MUX_PAD_CTRL(PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP); + imx51_soc_init(); + #if defined(CONFIG_CPU_FREQ_IMX) get_cpu_op = mx51_get_cpu_op; #endif diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-mx5/board-mx51_efikamx.c index 6e362315291..3be603b9075 100644 --- a/arch/arm/mach-mx5/board-mx51_efikamx.c +++ b/arch/arm/mach-mx5/board-mx51_efikamx.c @@ -236,6 +236,8 @@ late_initcall(mx51_efikamx_power_init); static void __init mx51_efikamx_init(void) { + imx51_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx51efikamx_pads, ARRAY_SIZE(mx51efikamx_pads)); efika_board_common_init(); diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c index 474fc6e4c6d..4b2e522de0f 100644 --- a/arch/arm/mach-mx5/board-mx51_efikasb.c +++ b/arch/arm/mach-mx5/board-mx51_efikasb.c @@ -248,6 +248,8 @@ static void __init mx51_efikasb_board_id(void) static void __init efikasb_board_init(void) { + imx51_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx51efikasb_pads, ARRAY_SIZE(mx51efikasb_pads)); efika_board_common_init(); diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c index f87d571882c..0d9218a6e2d 100644 --- a/arch/arm/mach-mx5/board-mx53_evk.c +++ b/arch/arm/mach-mx5/board-mx53_evk.c @@ -117,6 +117,8 @@ static const struct spi_imx_master mx53_evk_spi_data __initconst = { static void __init mx53_evk_board_init(void) { + imx53_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx53_evk_pads, ARRAY_SIZE(mx53_evk_pads)); mx53_evk_init_uart(); diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c index 1b947e8c9c0..359c3e248ad 100644 --- a/arch/arm/mach-mx5/board-mx53_loco.c +++ b/arch/arm/mach-mx5/board-mx53_loco.c @@ -227,6 +227,8 @@ static const struct imxi2c_platform_data mx53_loco_i2c_data __initconst = { static void __init mx53_loco_board_init(void) { + imx53_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx53_loco_pads, ARRAY_SIZE(mx53_loco_pads)); imx53_add_imx_uart(0, NULL); diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c index 817c08938f5..bc02894eafe 100644 --- a/arch/arm/mach-mx5/board-mx53_smd.c +++ b/arch/arm/mach-mx5/board-mx53_smd.c @@ -113,6 +113,8 @@ static const struct imxi2c_platform_data mx53_smd_i2c_data __initconst = { static void __init mx53_smd_board_init(void) { + imx53_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx53_smd_pads, ARRAY_SIZE(mx53_smd_pads)); mx53_smd_init_uart(); diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c index 6b89c1bf4eb..cd79e3435e2 100644 --- a/arch/arm/mach-mx5/clock-mx51-mx53.c +++ b/arch/arm/mach-mx5/clock-mx51-mx53.c @@ -1442,7 +1442,8 @@ static struct clk_lookup mx51_lookups[] = { _REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk) _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk) _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk) - _REGISTER_CLOCK("imx51-cspi.0", NULL, cspi_clk) + /* i.mx51 has the i.mx35 type cspi */ + _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_clk) @@ -1471,9 +1472,11 @@ static struct clk_lookup mx53_lookups[] = { _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_mx53_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_mx53_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, esdhc4_mx53_clk) - _REGISTER_CLOCK("imx53-ecspi.0", NULL, ecspi1_clk) - _REGISTER_CLOCK("imx53-ecspi.1", NULL, ecspi2_clk) - _REGISTER_CLOCK("imx53-cspi.0", NULL, cspi_clk) + /* i.mx53 has the i.mx51 type ecspi */ + _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk) + _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk) + /* i.mx53 has the i.mx25 type cspi */ + _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk) _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk) _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk) }; diff --git a/arch/arm/mach-mx5/devices.c b/arch/arm/mach-mx5/devices.c index 153ada53e57..371ca8c8414 100644 --- a/arch/arm/mach-mx5/devices.c +++ b/arch/arm/mach-mx5/devices.c @@ -12,7 +12,6 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> -#include <linux/gpio.h> #include <mach/hardware.h> #include <mach/imx-uart.h> #include <mach/irqs.h> @@ -119,66 +118,3 @@ struct platform_device mxc_usbh2_device = { .coherent_dma_mask = DMA_BIT_MASK(32), }, }; - -static struct mxc_gpio_port mxc_gpio_ports[] = { - { - .chip.label = "gpio-0", - .base = MX51_IO_ADDRESS(MX51_GPIO1_BASE_ADDR), - .irq = MX51_MXC_INT_GPIO1_LOW, - .irq_high = MX51_MXC_INT_GPIO1_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START - }, - { - .chip.label = "gpio-1", - .base = MX51_IO_ADDRESS(MX51_GPIO2_BASE_ADDR), - .irq = MX51_MXC_INT_GPIO2_LOW, - .irq_high = MX51_MXC_INT_GPIO2_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 1 - }, - { - .chip.label = "gpio-2", - .base = MX51_IO_ADDRESS(MX51_GPIO3_BASE_ADDR), - .irq = MX51_MXC_INT_GPIO3_LOW, - .irq_high = MX51_MXC_INT_GPIO3_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 2 - }, - { - .chip.label = "gpio-3", - .base = MX51_IO_ADDRESS(MX51_GPIO4_BASE_ADDR), - .irq = MX51_MXC_INT_GPIO4_LOW, - .irq_high = MX51_MXC_INT_GPIO4_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 3 - }, - { - .chip.label = "gpio-4", - .base = MX53_IO_ADDRESS(MX53_GPIO5_BASE_ADDR), - .irq = MX53_INT_GPIO5_LOW, - .irq_high = MX53_INT_GPIO5_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 4 - }, - { - .chip.label = "gpio-5", - .base = MX53_IO_ADDRESS(MX53_GPIO6_BASE_ADDR), - .irq = MX53_INT_GPIO6_LOW, - .irq_high = MX53_INT_GPIO6_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 5 - }, - { - .chip.label = "gpio-6", - .base = MX53_IO_ADDRESS(MX53_GPIO7_BASE_ADDR), - .irq = MX53_INT_GPIO7_LOW, - .irq_high = MX53_INT_GPIO7_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 6 - }, -}; - -int __init imx51_register_gpios(void) -{ - return mxc_gpio_init(mxc_gpio_ports, 4); -} - -int __init imx53_register_gpios(void) -{ - return mxc_gpio_init(mxc_gpio_ports, ARRAY_SIZE(mxc_gpio_ports)); -} - diff --git a/arch/arm/mach-mx5/mm-mx50.c b/arch/arm/mach-mx5/mm-mx50.c index b9c363b514a..77e374c726f 100644 --- a/arch/arm/mach-mx5/mm-mx50.c +++ b/arch/arm/mach-mx5/mm-mx50.c @@ -26,7 +26,6 @@ #include <mach/hardware.h> #include <mach/common.h> #include <mach/iomux-v3.h> -#include <mach/gpio.h> #include <mach/irqs.h> /* @@ -56,17 +55,18 @@ void __init imx50_init_early(void) mxc_arch_reset_init(MX50_IO_ADDRESS(MX50_WDOG_BASE_ADDR)); } -static struct mxc_gpio_port imx50_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 0, 1, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH), - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 1, 2, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH), - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 2, 3, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH), - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 3, 4, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH), - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 4, 5, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH), - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 5, 6, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH), -}; - void __init mx50_init_irq(void) { tzic_init_irq(MX50_IO_ADDRESS(MX50_TZIC_BASE_ADDR)); - mxc_gpio_init(imx50_gpio_ports, ARRAY_SIZE(imx50_gpio_ports)); +} + +void __init imx50_soc_init(void) +{ + /* i.mx50 has the i.mx31 type gpio */ + mxc_register_gpio("imx31-gpio", 0, MX50_GPIO1_BASE_ADDR, SZ_16K, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH); + mxc_register_gpio("imx31-gpio", 1, MX50_GPIO2_BASE_ADDR, SZ_16K, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH); + mxc_register_gpio("imx31-gpio", 2, MX50_GPIO3_BASE_ADDR, SZ_16K, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH); + mxc_register_gpio("imx31-gpio", 3, MX50_GPIO4_BASE_ADDR, SZ_16K, MX50_INT_GPIO4_LOW, MX50_INT_GPIO4_HIGH); + mxc_register_gpio("imx31-gpio", 4, MX50_GPIO5_BASE_ADDR, SZ_16K, MX50_INT_GPIO5_LOW, MX50_INT_GPIO5_HIGH); + mxc_register_gpio("imx31-gpio", 5, MX50_GPIO6_BASE_ADDR, SZ_16K, MX50_INT_GPIO6_LOW, MX50_INT_GPIO6_HIGH); } diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c index ff557301b42..665843d6c2b 100644 --- a/arch/arm/mach-mx5/mm.c +++ b/arch/arm/mach-mx5/mm.c @@ -69,8 +69,6 @@ void __init imx53_init_early(void) mxc_arch_reset_init(MX53_IO_ADDRESS(MX53_WDOG1_BASE_ADDR)); } -int imx51_register_gpios(void); - void __init mx51_init_irq(void) { unsigned long tzic_addr; @@ -86,11 +84,8 @@ void __init mx51_init_irq(void) panic("unable to map TZIC interrupt controller\n"); tzic_init_irq(tzic_virt); - imx51_register_gpios(); } -int imx53_register_gpios(void); - void __init mx53_init_irq(void) { unsigned long tzic_addr; @@ -103,5 +98,25 @@ void __init mx53_init_irq(void) panic("unable to map TZIC interrupt controller\n"); tzic_init_irq(tzic_virt); - imx53_register_gpios(); +} + +void __init imx51_soc_init(void) +{ + /* i.mx51 has the i.mx31 type gpio */ + mxc_register_gpio("imx31-gpio", 0, MX51_GPIO1_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO1_LOW, MX51_MXC_INT_GPIO1_HIGH); + mxc_register_gpio("imx31-gpio", 1, MX51_GPIO2_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO2_LOW, MX51_MXC_INT_GPIO2_HIGH); + mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO3_LOW, MX51_MXC_INT_GPIO3_HIGH); + mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO4_LOW, MX51_MXC_INT_GPIO4_HIGH); +} + +void __init imx53_soc_init(void) +{ + /* i.mx53 has the i.mx31 type gpio */ + mxc_register_gpio("imx31-gpio", 0, MX53_GPIO1_BASE_ADDR, SZ_16K, MX53_INT_GPIO1_LOW, MX53_INT_GPIO1_HIGH); + mxc_register_gpio("imx31-gpio", 1, MX53_GPIO2_BASE_ADDR, SZ_16K, MX53_INT_GPIO2_LOW, MX53_INT_GPIO2_HIGH); + mxc_register_gpio("imx31-gpio", 2, MX53_GPIO3_BASE_ADDR, SZ_16K, MX53_INT_GPIO3_LOW, MX53_INT_GPIO3_HIGH); + mxc_register_gpio("imx31-gpio", 3, MX53_GPIO4_BASE_ADDR, SZ_16K, MX53_INT_GPIO4_LOW, MX53_INT_GPIO4_HIGH); + mxc_register_gpio("imx31-gpio", 4, MX53_GPIO5_BASE_ADDR, SZ_16K, MX53_INT_GPIO5_LOW, MX53_INT_GPIO5_HIGH); + mxc_register_gpio("imx31-gpio", 5, MX53_GPIO6_BASE_ADDR, SZ_16K, MX53_INT_GPIO6_LOW, MX53_INT_GPIO6_HIGH); + mxc_register_gpio("imx31-gpio", 6, MX53_GPIO7_BASE_ADDR, SZ_16K, MX53_INT_GPIO7_LOW, MX53_INT_GPIO7_HIGH); } diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile index 58e892376bf..6c38262a3aa 100644 --- a/arch/arm/mach-mxs/Makefile +++ b/arch/arm/mach-mxs/Makefile @@ -1,5 +1,5 @@ # Common support -obj-y := clock.o devices.o gpio.o icoll.o iomux.o system.o timer.o +obj-y := clock.o devices.o icoll.o iomux.o system.o timer.o obj-$(CONFIG_MXS_OCOTP) += ocotp.o obj-$(CONFIG_PM) += pm.o diff --git a/arch/arm/mach-mxs/devices.c b/arch/arm/mach-mxs/devices.c index cfdb6b28470..fe3e847930c 100644 --- a/arch/arm/mach-mxs/devices.c +++ b/arch/arm/mach-mxs/devices.c @@ -88,3 +88,14 @@ int __init mxs_add_amba_device(const struct amba_device *dev) return amba_device_register(adev, &iomem_resource); } + +struct device mxs_apbh_bus = { + .init_name = "mxs_apbh", + .parent = &platform_bus, +}; + +static int __init mxs_device_init(void) +{ + return device_register(&mxs_apbh_bus); +} +core_initcall(mxs_device_init); diff --git a/arch/arm/mach-mxs/devices/Makefile b/arch/arm/mach-mxs/devices/Makefile index 324f2824d38..351915c683f 100644 --- a/arch/arm/mach-mxs/devices/Makefile +++ b/arch/arm/mach-mxs/devices/Makefile @@ -6,4 +6,5 @@ obj-$(CONFIG_MXS_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_I2C) += platform-mxs-i2c.o obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_MMC) += platform-mxs-mmc.o obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_PWM) += platform-mxs-pwm.o +obj-y += platform-gpio-mxs.o obj-$(CONFIG_MXS_HAVE_PLATFORM_MXSFB) += platform-mxsfb.o diff --git a/arch/arm/mach-mxs/devices/platform-auart.c b/arch/arm/mach-mxs/devices/platform-auart.c index 796606cce0c..27608f5d2ac 100644 --- a/arch/arm/mach-mxs/devices/platform-auart.c +++ b/arch/arm/mach-mxs/devices/platform-auart.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <asm/sizes.h> #include <mach/mx23.h> #include <mach/mx28.h> diff --git a/arch/arm/mach-mxs/devices/platform-dma.c b/arch/arm/mach-mxs/devices/platform-dma.c index 295c4424d5d..6a0202b1016 100644 --- a/arch/arm/mach-mxs/devices/platform-dma.c +++ b/arch/arm/mach-mxs/devices/platform-dma.c @@ -6,6 +6,7 @@ * Free Software Foundation. */ #include <linux/compiler.h> +#include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/init.h> diff --git a/arch/arm/mach-mxs/devices/platform-fec.c b/arch/arm/mach-mxs/devices/platform-fec.c index 9859cf28333..ae96a4fd8f1 100644 --- a/arch/arm/mach-mxs/devices/platform-fec.c +++ b/arch/arm/mach-mxs/devices/platform-fec.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <asm/sizes.h> #include <mach/mx28.h> #include <mach/devices-common.h> diff --git a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c new file mode 100644 index 00000000000..ed0885e414e --- /dev/null +++ b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c @@ -0,0 +1,53 @@ +/* + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ +#include <linux/compiler.h> +#include <linux/err.h> +#include <linux/init.h> + +#include <mach/mx23.h> +#include <mach/mx28.h> +#include <mach/devices-common.h> + +struct platform_device *__init mxs_add_gpio( + int id, resource_size_t iobase, int irq) +{ + struct resource res[] = { + { + .start = iobase, + .end = iobase + SZ_8K - 1, + .flags = IORESOURCE_MEM, + }, { + .start = irq, + .end = irq, + .flags = IORESOURCE_IRQ, + }, + }; + + return platform_device_register_resndata(&mxs_apbh_bus, + "gpio-mxs", id, res, ARRAY_SIZE(res), NULL, 0); +} + +static int __init mxs_add_mxs_gpio(void) +{ + if (cpu_is_mx23()) { + mxs_add_gpio(0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0); + mxs_add_gpio(1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1); + mxs_add_gpio(2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2); + } + + if (cpu_is_mx28()) { + mxs_add_gpio(0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0); + mxs_add_gpio(1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1); + mxs_add_gpio(2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2); + mxs_add_gpio(3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3); + mxs_add_gpio(4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4); + } + + return 0; +} +postcore_initcall(mxs_add_mxs_gpio); diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c deleted file mode 100644 index 2c950fef71a..00000000000 --- a/arch/arm/mach-mxs/gpio.c +++ /dev/null @@ -1,331 +0,0 @@ -/* - * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de> - * Copyright 2008 Juergen Beisert, kernel@pengutronix.de - * - * Based on code from Freescale, - * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301, USA. - */ - -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/gpio.h> -#include <mach/mx23.h> -#include <mach/mx28.h> -#include <asm-generic/bug.h> - -#include "gpio.h" - -static struct mxs_gpio_port *mxs_gpio_ports; -static int gpio_table_size; - -#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10) -#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10) -#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10) -#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10) -#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10) -#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10) -#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10) -#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10) - -#define GPIO_INT_FALL_EDGE 0x0 -#define GPIO_INT_LOW_LEV 0x1 -#define GPIO_INT_RISE_EDGE 0x2 -#define GPIO_INT_HIGH_LEV 0x3 -#define GPIO_INT_LEV_MASK (1 << 0) -#define GPIO_INT_POL_MASK (1 << 1) - -/* Note: This driver assumes 32 GPIOs are handled in one register */ - -static void clear_gpio_irqstatus(struct mxs_gpio_port *port, u32 index) -{ - __mxs_clrl(1 << index, port->base + PINCTRL_IRQSTAT(port->id)); -} - -static void set_gpio_irqenable(struct mxs_gpio_port *port, u32 index, - int enable) -{ - if (enable) { - __mxs_setl(1 << index, port->base + PINCTRL_IRQEN(port->id)); - __mxs_setl(1 << index, port->base + PINCTRL_PIN2IRQ(port->id)); - } else { - __mxs_clrl(1 << index, port->base + PINCTRL_IRQEN(port->id)); - } -} - -static void mxs_gpio_ack_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - clear_gpio_irqstatus(&mxs_gpio_ports[gpio / 32], gpio & 0x1f); -} - -static void mxs_gpio_mask_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 0); -} - -static void mxs_gpio_unmask_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 1); -} - -static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset); - -static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type) -{ - u32 gpio = irq_to_gpio(d->irq); - u32 pin_mask = 1 << (gpio & 31); - struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32]; - void __iomem *pin_addr; - int edge; - - switch (type) { - case IRQ_TYPE_EDGE_RISING: - edge = GPIO_INT_RISE_EDGE; - break; - case IRQ_TYPE_EDGE_FALLING: - edge = GPIO_INT_FALL_EDGE; - break; - case IRQ_TYPE_LEVEL_LOW: - edge = GPIO_INT_LOW_LEV; - break; - case IRQ_TYPE_LEVEL_HIGH: - edge = GPIO_INT_HIGH_LEV; - break; - default: - return -EINVAL; - } - - /* set level or edge */ - pin_addr = port->base + PINCTRL_IRQLEV(port->id); - if (edge & GPIO_INT_LEV_MASK) - __mxs_setl(pin_mask, pin_addr); - else - __mxs_clrl(pin_mask, pin_addr); - - /* set polarity */ - pin_addr = port->base + PINCTRL_IRQPOL(port->id); - if (edge & GPIO_INT_POL_MASK) - __mxs_setl(pin_mask, pin_addr); - else - __mxs_clrl(pin_mask, pin_addr); - - clear_gpio_irqstatus(port, gpio & 0x1f); - - return 0; -} - -/* MXS has one interrupt *per* gpio port */ -static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) -{ - u32 irq_stat; - struct mxs_gpio_port *port = (struct mxs_gpio_port *)irq_get_handler_data(irq); - u32 gpio_irq_no_base = port->virtual_irq_start; - - desc->irq_data.chip->irq_ack(&desc->irq_data); - - irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) & - __raw_readl(port->base + PINCTRL_IRQEN(port->id)); - - while (irq_stat != 0) { - int irqoffset = fls(irq_stat) - 1; - generic_handle_irq(gpio_irq_no_base + irqoffset); - irq_stat &= ~(1 << irqoffset); - } -} - -/* - * Set interrupt number "irq" in the GPIO as a wake-up source. - * While system is running, all registered GPIO interrupts need to have - * wake-up enabled. When system is suspended, only selected GPIO interrupts - * need to have wake-up enabled. - * @param irq interrupt source number - * @param enable enable as wake-up if equal to non-zero - * @return This function returns 0 on success. - */ -static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable) -{ - u32 gpio = irq_to_gpio(d->irq); - u32 gpio_idx = gpio & 0x1f; - struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32]; - - if (enable) { - if (port->irq_high && (gpio_idx >= 16)) - enable_irq_wake(port->irq_high); - else - enable_irq_wake(port->irq); - } else { - if (port->irq_high && (gpio_idx >= 16)) - disable_irq_wake(port->irq_high); - else - disable_irq_wake(port->irq); - } - - return 0; -} - -static struct irq_chip gpio_irq_chip = { - .name = "mxs gpio", - .irq_ack = mxs_gpio_ack_irq, - .irq_mask = mxs_gpio_mask_irq, - .irq_unmask = mxs_gpio_unmask_irq, - .irq_set_type = mxs_gpio_set_irq_type, - .irq_set_wake = mxs_gpio_set_wake_irq, -}; - -static void mxs_set_gpio_direction(struct gpio_chip *chip, unsigned offset, - int dir) -{ - struct mxs_gpio_port *port = - container_of(chip, struct mxs_gpio_port, chip); - void __iomem *pin_addr = port->base + PINCTRL_DOE(port->id); - - if (dir) - __mxs_setl(1 << offset, pin_addr); - else - __mxs_clrl(1 << offset, pin_addr); -} - -static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct mxs_gpio_port *port = - container_of(chip, struct mxs_gpio_port, chip); - - return (__raw_readl(port->base + PINCTRL_DIN(port->id)) >> offset) & 1; -} - -static void mxs_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - struct mxs_gpio_port *port = - container_of(chip, struct mxs_gpio_port, chip); - void __iomem *pin_addr = port->base + PINCTRL_DOUT(port->id); - - if (value) - __mxs_setl(1 << offset, pin_addr); - else - __mxs_clrl(1 << offset, pin_addr); -} - -static int mxs_gpio_to_irq(struct gpio_chip *chip, unsigned offset) -{ - struct mxs_gpio_port *port = - container_of(chip, struct mxs_gpio_port, chip); - - return port->virtual_irq_start + offset; -} - -static int mxs_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -{ - mxs_set_gpio_direction(chip, offset, 0); - return 0; -} - -static int mxs_gpio_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - mxs_gpio_set(chip, offset, value); - mxs_set_gpio_direction(chip, offset, 1); - return 0; -} - -int __init mxs_gpio_init(struct mxs_gpio_port *port, int cnt) -{ - int i, j; - - /* save for local usage */ - mxs_gpio_ports = port; - gpio_table_size = cnt; - - pr_info("MXS GPIO hardware\n"); - - for (i = 0; i < cnt; i++) { - /* disable the interrupt and clear the status */ - __raw_writel(0, port[i].base + PINCTRL_PIN2IRQ(i)); - __raw_writel(0, port[i].base + PINCTRL_IRQEN(i)); - - /* clear address has to be used to clear IRQSTAT bits */ - __mxs_clrl(~0U, port[i].base + PINCTRL_IRQSTAT(i)); - - for (j = port[i].virtual_irq_start; - j < port[i].virtual_irq_start + 32; j++) { - irq_set_chip_and_handler(j, &gpio_irq_chip, - handle_level_irq); - set_irq_flags(j, IRQF_VALID); - } - - /* setup one handler for each entry */ - irq_set_chained_handler(port[i].irq, mxs_gpio_irq_handler); - irq_set_handler_data(port[i].irq, &port[i]); - - /* register gpio chip */ - port[i].chip.direction_input = mxs_gpio_direction_input; - port[i].chip.direction_output = mxs_gpio_direction_output; - port[i].chip.get = mxs_gpio_get; - port[i].chip.set = mxs_gpio_set; - port[i].chip.to_irq = mxs_gpio_to_irq; - port[i].chip.base = i * 32; - port[i].chip.ngpio = 32; - - /* its a serious configuration bug when it fails */ - BUG_ON(gpiochip_add(&port[i].chip) < 0); - } - - return 0; -} - -#define MX23_GPIO_BASE MX23_IO_ADDRESS(MX23_PINCTRL_BASE_ADDR) -#define MX28_GPIO_BASE MX28_IO_ADDRESS(MX28_PINCTRL_BASE_ADDR) - -#define DEFINE_MXS_GPIO_PORT(_base, _irq, _id) \ - { \ - .chip.label = "gpio-" #_id, \ - .id = _id, \ - .irq = _irq, \ - .base = _base, \ - .virtual_irq_start = MXS_GPIO_IRQ_START + (_id) * 32, \ - } - -#ifdef CONFIG_SOC_IMX23 -static struct mxs_gpio_port mx23_gpio_ports[] = { - DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO0, 0), - DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO1, 1), - DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO2, 2), -}; - -int __init mx23_register_gpios(void) -{ - return mxs_gpio_init(mx23_gpio_ports, ARRAY_SIZE(mx23_gpio_ports)); -} -#endif - -#ifdef CONFIG_SOC_IMX28 -static struct mxs_gpio_port mx28_gpio_ports[] = { - DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO0, 0), - DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO1, 1), - DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO2, 2), - DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO3, 3), - DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO4, 4), -}; - -int __init mx28_register_gpios(void) -{ - return mxs_gpio_init(mx28_gpio_ports, ARRAY_SIZE(mx28_gpio_ports)); -} -#endif diff --git a/arch/arm/mach-mxs/gpio.h b/arch/arm/mach-mxs/gpio.h deleted file mode 100644 index 005bb06630b..00000000000 --- a/arch/arm/mach-mxs/gpio.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved. - * Copyright 2008 Juergen Beisert, kernel@pengutronix.de - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301, USA. - */ - -#ifndef __MXS_GPIO_H__ -#define __MXS_GPIO_H__ - -struct mxs_gpio_port { - void __iomem *base; - int id; - int irq; - int irq_high; - int virtual_irq_start; - struct gpio_chip chip; -}; - -int mxs_gpio_init(struct mxs_gpio_port*, int); - -#endif /* __MXS_GPIO_H__ */ diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h index 7a37469ed5b..812d7a813a7 100644 --- a/arch/arm/mach-mxs/include/mach/devices-common.h +++ b/arch/arm/mach-mxs/include/mach/devices-common.h @@ -11,6 +11,8 @@ #include <linux/init.h> #include <linux/amba/bus.h> +extern struct device mxs_apbh_bus; + struct platform_device *mxs_add_platform_device_dmamask( const char *name, int id, const struct resource *res, unsigned int num_resources, diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c index eacdc6b0e70..56767a5cce0 100644 --- a/arch/arm/mach-mxs/mach-mx28evk.c +++ b/arch/arm/mach-mxs/mach-mx28evk.c @@ -26,7 +26,6 @@ #include <mach/iomux-mx28.h> #include "devices-mx28.h" -#include "gpio.h" #define MX28EVK_FLEXCAN_SWITCH MXS_GPIO_NR(2, 13) #define MX28EVK_FEC_PHY_POWER MXS_GPIO_NR(2, 15) diff --git a/arch/arm/mach-mxs/mm-mx23.c b/arch/arm/mach-mxs/mm-mx23.c index 5148cd64a6b..1b2345ac1a8 100644 --- a/arch/arm/mach-mxs/mm-mx23.c +++ b/arch/arm/mach-mxs/mm-mx23.c @@ -41,5 +41,4 @@ void __init mx23_map_io(void) void __init mx23_init_irq(void) { icoll_init_irq(); - mx23_register_gpios(); } diff --git a/arch/arm/mach-mxs/mm-mx28.c b/arch/arm/mach-mxs/mm-mx28.c index 7e4cea32ebc..b6e18ddb92c 100644 --- a/arch/arm/mach-mxs/mm-mx28.c +++ b/arch/arm/mach-mxs/mm-mx28.c @@ -41,5 +41,4 @@ void __init mx28_map_io(void) void __init mx28_init_irq(void) { icoll_init_irq(); - mx28_register_gpios(); } diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c index 364137c2042..399da4ce017 100644 --- a/arch/arm/mach-omap1/gpio15xx.c +++ b/arch/arm/mach-omap1/gpio15xx.c @@ -34,11 +34,22 @@ static struct __initdata resource omap15xx_mpu_gpio_resources[] = { }, }; +static struct omap_gpio_reg_offs omap15xx_mpuio_regs = { + .revision = USHRT_MAX, + .direction = OMAP_MPUIO_IO_CNTL, + .datain = OMAP_MPUIO_INPUT_LATCH, + .dataout = OMAP_MPUIO_OUTPUT, + .irqstatus = OMAP_MPUIO_GPIO_INT, + .irqenable = OMAP_MPUIO_GPIO_MASKIT, + .irqenable_inv = true, +}; + static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = { .virtual_irq_start = IH_MPUIO_BASE, .bank_type = METHOD_MPUIO, .bank_width = 16, .bank_stride = 1, + .regs = &omap15xx_mpuio_regs, }; static struct platform_device omap15xx_mpu_gpio = { @@ -64,10 +75,21 @@ static struct __initdata resource omap15xx_gpio_resources[] = { }, }; +static struct omap_gpio_reg_offs omap15xx_gpio_regs = { + .revision = USHRT_MAX, + .direction = OMAP1510_GPIO_DIR_CONTROL, + .datain = OMAP1510_GPIO_DATA_INPUT, + .dataout = OMAP1510_GPIO_DATA_OUTPUT, + .irqstatus = OMAP1510_GPIO_INT_STATUS, + .irqenable = OMAP1510_GPIO_INT_MASK, + .irqenable_inv = true, +}; + static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = { .virtual_irq_start = IH_GPIO_BASE, .bank_type = METHOD_GPIO_1510, .bank_width = 16, + .regs = &omap15xx_gpio_regs, }; static struct platform_device omap15xx_gpio = { diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c index 293a246e282..0f399bd0e70 100644 --- a/arch/arm/mach-omap1/gpio16xx.c +++ b/arch/arm/mach-omap1/gpio16xx.c @@ -37,11 +37,22 @@ static struct __initdata resource omap16xx_mpu_gpio_resources[] = { }, }; +static struct omap_gpio_reg_offs omap16xx_mpuio_regs = { + .revision = USHRT_MAX, + .direction = OMAP_MPUIO_IO_CNTL, + .datain = OMAP_MPUIO_INPUT_LATCH, + .dataout = OMAP_MPUIO_OUTPUT, + .irqstatus = OMAP_MPUIO_GPIO_INT, + .irqenable = OMAP_MPUIO_GPIO_MASKIT, + .irqenable_inv = true, +}; + static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = { .virtual_irq_start = IH_MPUIO_BASE, .bank_type = METHOD_MPUIO, .bank_width = 16, .bank_stride = 1, + .regs = &omap16xx_mpuio_regs, }; static struct platform_device omap16xx_mpu_gpio = { @@ -67,10 +78,24 @@ static struct __initdata resource omap16xx_gpio1_resources[] = { }, }; +static struct omap_gpio_reg_offs omap16xx_gpio_regs = { + .revision = OMAP1610_GPIO_REVISION, + .direction = OMAP1610_GPIO_DIRECTION, + .set_dataout = OMAP1610_GPIO_SET_DATAOUT, + .clr_dataout = OMAP1610_GPIO_CLEAR_DATAOUT, + .datain = OMAP1610_GPIO_DATAIN, + .dataout = OMAP1610_GPIO_DATAOUT, + .irqstatus = OMAP1610_GPIO_IRQSTATUS1, + .irqenable = OMAP1610_GPIO_IRQENABLE1, + .set_irqenable = OMAP1610_GPIO_SET_IRQENABLE1, + .clr_irqenable = OMAP1610_GPIO_CLEAR_IRQENABLE1, +}; + static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = { .virtual_irq_start = IH_GPIO_BASE, .bank_type = METHOD_GPIO_1610, .bank_width = 16, + .regs = &omap16xx_gpio_regs, }; static struct platform_device omap16xx_gpio1 = { @@ -100,6 +125,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = { .virtual_irq_start = IH_GPIO_BASE + 16, .bank_type = METHOD_GPIO_1610, .bank_width = 16, + .regs = &omap16xx_gpio_regs, }; static struct platform_device omap16xx_gpio2 = { @@ -129,6 +155,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = { .virtual_irq_start = IH_GPIO_BASE + 32, .bank_type = METHOD_GPIO_1610, .bank_width = 16, + .regs = &omap16xx_gpio_regs, }; static struct platform_device omap16xx_gpio3 = { @@ -158,6 +185,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = { .virtual_irq_start = IH_GPIO_BASE + 48, .bank_type = METHOD_GPIO_1610, .bank_width = 16, + .regs = &omap16xx_gpio_regs, }; static struct platform_device omap16xx_gpio4 = { diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c index c6ad248d63a..5ab63eab0ff 100644 --- a/arch/arm/mach-omap1/gpio7xx.c +++ b/arch/arm/mach-omap1/gpio7xx.c @@ -39,11 +39,22 @@ static struct __initdata resource omap7xx_mpu_gpio_resources[] = { }, }; +static struct omap_gpio_reg_offs omap7xx_mpuio_regs = { + .revision = USHRT_MAX, + .direction = OMAP_MPUIO_IO_CNTL / 2, + .datain = OMAP_MPUIO_INPUT_LATCH / 2, + .dataout = OMAP_MPUIO_OUTPUT / 2, + .irqstatus = OMAP_MPUIO_GPIO_INT / 2, + .irqenable = OMAP_MPUIO_GPIO_MASKIT / 2, + .irqenable_inv = true, +}; + static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = { .virtual_irq_start = IH_MPUIO_BASE, .bank_type = METHOD_MPUIO, .bank_width = 32, .bank_stride = 2, + .regs = &omap7xx_mpuio_regs, }; static struct platform_device omap7xx_mpu_gpio = { @@ -69,10 +80,21 @@ static struct __initdata resource omap7xx_gpio1_resources[] = { }, }; +static struct omap_gpio_reg_offs omap7xx_gpio_regs = { + .revision = USHRT_MAX, + .direction = OMAP7XX_GPIO_DIR_CONTROL, + .datain = OMAP7XX_GPIO_DATA_INPUT, + .dataout = OMAP7XX_GPIO_DATA_OUTPUT, + .irqstatus = OMAP7XX_GPIO_INT_STATUS, + .irqenable = OMAP7XX_GPIO_INT_MASK, + .irqenable_inv = true, +}; + static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = { .virtual_irq_start = IH_GPIO_BASE, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; static struct platform_device omap7xx_gpio1 = { @@ -102,6 +124,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = { .virtual_irq_start = IH_GPIO_BASE + 32, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; static struct platform_device omap7xx_gpio2 = { @@ -131,6 +154,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = { .virtual_irq_start = IH_GPIO_BASE + 64, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; static struct platform_device omap7xx_gpio3 = { @@ -160,6 +184,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = { .virtual_irq_start = IH_GPIO_BASE + 96, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; static struct platform_device omap7xx_gpio4 = { @@ -189,6 +214,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = { .virtual_irq_start = IH_GPIO_BASE + 128, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; static struct platform_device omap7xx_gpio5 = { @@ -218,6 +244,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = { .virtual_irq_start = IH_GPIO_BASE + 160, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; static struct platform_device omap7xx_gpio6 = { diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c index 334fb8871bc..943072d5a1d 100644 --- a/arch/arm/mach-omap1/pm_bus.c +++ b/arch/arm/mach-omap1/pm_bus.c @@ -32,7 +32,7 @@ static int omap1_pm_runtime_suspend(struct device *dev) if (ret) return ret; - ret = pm_runtime_clk_suspend(dev); + ret = pm_clk_suspend(dev); if (ret) { pm_generic_runtime_resume(dev); return ret; @@ -45,24 +45,24 @@ static int omap1_pm_runtime_resume(struct device *dev) { dev_dbg(dev, "%s\n", __func__); - pm_runtime_clk_resume(dev); + pm_clk_resume(dev); return pm_generic_runtime_resume(dev); } -static struct dev_power_domain default_power_domain = { +static struct dev_pm_domain default_pm_domain = { .ops = { .runtime_suspend = omap1_pm_runtime_suspend, .runtime_resume = omap1_pm_runtime_resume, USE_PLATFORM_PM_SLEEP_OPS }, }; -#define OMAP1_PWR_DOMAIN (&default_power_domain) +#define OMAP1_PM_DOMAIN (&default_pm_domain) #else -#define OMAP1_PWR_DOMAIN NULL +#define OMAP1_PM_DOMAIN NULL #endif /* CONFIG_PM_RUNTIME */ static struct pm_clk_notifier_block platform_bus_notifier = { - .pwr_domain = OMAP1_PWR_DOMAIN, + .pm_domain = OMAP1_PM_DOMAIN, .con_ids = { "ick", "fck", NULL, }, }; @@ -71,7 +71,7 @@ static int __init omap1_pm_runtime_init(void) if (!cpu_class_is_omap1()) return -ENODEV; - pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); + pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c index 9529842ae05..2765cdc3152 100644 --- a/arch/arm/mach-omap2/gpio.c +++ b/arch/arm/mach-omap2/gpio.c @@ -61,13 +61,45 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused) pdata->dbck_flag = dev_attr->dbck_flag; pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1); + pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL); + if (!pdata) { + pr_err("gpio%d: Memory allocation failed\n", id); + return -ENOMEM; + } + switch (oh->class->rev) { case 0: case 1: pdata->bank_type = METHOD_GPIO_24XX; + pdata->regs->revision = OMAP24XX_GPIO_REVISION; + pdata->regs->direction = OMAP24XX_GPIO_OE; + pdata->regs->datain = OMAP24XX_GPIO_DATAIN; + pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT; + pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT; + pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT; + pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1; + pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2; + pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1; + pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1; + pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1; + pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL; + pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN; break; case 2: pdata->bank_type = METHOD_GPIO_44XX; + pdata->regs->revision = OMAP4_GPIO_REVISION; + pdata->regs->direction = OMAP4_GPIO_OE; + pdata->regs->datain = OMAP4_GPIO_DATAIN; + pdata->regs->dataout = OMAP4_GPIO_DATAOUT; + pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT; + pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT; + pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0; + pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1; + pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0; + pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0; + pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0; + pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME; + pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE; break; default: WARN(1, "Invalid gpio bank_type\n"); @@ -87,6 +119,8 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused) return PTR_ERR(od); } + omap_device_disable_idle_on_suspend(od); + gpio_bank_count++; return 0; } diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 1ac361b7b8c..466fc722fa0 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c @@ -805,6 +805,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata) WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n", name, oh->name); + omap_device_disable_idle_on_suspend(od); oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt); uart->irq = oh->mpu_irqs[0].irq; diff --git a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h b/arch/arm/mach-s3c2410/include/mach/spi-gpio.h deleted file mode 100644 index dcef2287cb3..00000000000 --- a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h +++ /dev/null @@ -1,28 +0,0 @@ -/* arch/arm/mach-s3c2410/include/mach/spi-gpio.h - * - * Copyright (c) 2006 Simtec Electronics - * Ben Dooks <ben@simtec.co.uk> - * - * S3C2410 - SPI Controller platform_device info - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef __ASM_ARCH_SPIGPIO_H -#define __ASM_ARCH_SPIGPIO_H __FILE__ - -struct s3c2410_spigpio_info { - unsigned long pin_clk; - unsigned long pin_mosi; - unsigned long pin_miso; - - int num_chipselect; - int bus_num; - - void (*chip_select)(struct s3c2410_spigpio_info *spi, int cs); -}; - - -#endif /* __ASM_ARCH_SPIGPIO_H */ diff --git a/arch/arm/mach-s3c2410/mach-qt2410.c b/arch/arm/mach-s3c2410/mach-qt2410.c index e8f49feef28..f44f77531b1 100644 --- a/arch/arm/mach-s3c2410/mach-qt2410.c +++ b/arch/arm/mach-s3c2410/mach-qt2410.c @@ -32,7 +32,7 @@ #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/spi/spi.h> -#include <linux/spi/spi_bitbang.h> +#include <linux/spi/spi_gpio.h> #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> @@ -53,8 +53,6 @@ #include <mach/fb.h> #include <plat/nand.h> #include <plat/udc.h> -#include <mach/spi.h> -#include <mach/spi-gpio.h> #include <plat/iic.h> #include <plat/common-smdk.h> @@ -216,32 +214,16 @@ static struct platform_device qt2410_led = { /* SPI */ -static void spi_gpio_cs(struct s3c2410_spigpio_info *spi, int cs) -{ - switch (cs) { - case BITBANG_CS_ACTIVE: - gpio_set_value(S3C2410_GPB(5), 0); - break; - case BITBANG_CS_INACTIVE: - gpio_set_value(S3C2410_GPB(5), 1); - break; - } -} - -static struct s3c2410_spigpio_info spi_gpio_cfg = { - .pin_clk = S3C2410_GPG(7), - .pin_mosi = S3C2410_GPG(6), - .pin_miso = S3C2410_GPG(5), - .chip_select = &spi_gpio_cs, +static struct spi_gpio_platform_data spi_gpio_cfg = { + .sck = S3C2410_GPG(7), + .mosi = S3C2410_GPG(6), + .miso = S3C2410_GPG(5), }; - static struct platform_device qt2410_spi = { - .name = "s3c24xx-spi-gpio", - .id = 1, - .dev = { - .platform_data = &spi_gpio_cfg, - }, + .name = "spi-gpio", + .id = 1, + .dev.platform_data = &spi_gpio_cfg, }; /* Board devices */ diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c index 85dcaeb9e62..5eeb47580b0 100644 --- a/arch/arm/mach-s3c2412/mach-jive.c +++ b/arch/arm/mach-s3c2412/mach-jive.c @@ -25,6 +25,7 @@ #include <video/ili9320.h> #include <linux/spi/spi.h> +#include <linux/spi/spi_gpio.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -38,7 +39,6 @@ #include <mach/regs-gpio.h> #include <mach/regs-mem.h> #include <mach/regs-lcd.h> -#include <mach/spi-gpio.h> #include <mach/fb.h> #include <asm/mach-types.h> @@ -389,45 +389,30 @@ static struct ili9320_platdata jive_lcm_config = { /* LCD SPI support */ -static void jive_lcd_spi_chipselect(struct s3c2410_spigpio_info *spi, int cs) -{ - gpio_set_value(S3C2410_GPB(7), cs ? 0 : 1); -} - -static struct s3c2410_spigpio_info jive_lcd_spi = { - .bus_num = 1, - .pin_clk = S3C2410_GPG(8), - .pin_mosi = S3C2410_GPB(8), - .num_chipselect = 1, - .chip_select = jive_lcd_spi_chipselect, +static struct spi_gpio_platform_data jive_lcd_spi = { + .sck = S3C2410_GPG(8), + .mosi = S3C2410_GPB(8), + .miso = SPI_GPIO_NO_MISO, }; static struct platform_device jive_device_lcdspi = { - .name = "spi_s3c24xx_gpio", + .name = "spi-gpio", .id = 1, - .num_resources = 0, .dev.platform_data = &jive_lcd_spi, }; -/* WM8750 audio code SPI definition */ -static void jive_wm8750_chipselect(struct s3c2410_spigpio_info *spi, int cs) -{ - gpio_set_value(S3C2410_GPH(10), cs ? 0 : 1); -} +/* WM8750 audio code SPI definition */ -static struct s3c2410_spigpio_info jive_wm8750_spi = { - .bus_num = 2, - .pin_clk = S3C2410_GPB(4), - .pin_mosi = S3C2410_GPB(9), - .num_chipselect = 1, - .chip_select = jive_wm8750_chipselect, +static struct spi_gpio_platform_data jive_wm8750_spi = { + .sck = S3C2410_GPB(4), + .mosi = S3C2410_GPB(9), + .miso = SPI_GPIO_NO_MISO, }; static struct platform_device jive_device_wm8750 = { - .name = "spi_s3c24xx_gpio", + .name = "spi-gpio", .id = 2, - .num_resources = 0, .dev.platform_data = &jive_wm8750_spi, }; @@ -441,12 +426,14 @@ static struct spi_board_info __initdata jive_spi_devs[] = { .mode = SPI_MODE_3, /* CPOL=1, CPHA=1 */ .max_speed_hz = 100000, .platform_data = &jive_lcm_config, + .controller_data = (void *)S3C2410_GPB(7), }, { .modalias = "WM8750", .bus_num = 2, .chip_select = 0, .mode = SPI_MODE_0, /* CPOL=0, CPHA=0 */ .max_speed_hz = 100000, + .controller_data = (void *)S3C2410_GPH(10), }, }; diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c index 716662008ce..c10ddf4ed7f 100644 --- a/arch/arm/mach-s3c2440/mach-gta02.c +++ b/arch/arm/mach-s3c2440/mach-gta02.c @@ -74,7 +74,6 @@ #include <mach/fb.h> #include <mach/spi.h> -#include <mach/spi-gpio.h> #include <plat/usb-control.h> #include <mach/regs-mem.h> #include <mach/hardware.h> diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 803bc6edfca..b473b8efac6 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -1408,9 +1408,14 @@ static void __init ap4evb_init(void) platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); + sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc1_device); + sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); + hdmi_init_pm_clock(); fsi_init_pm_clock(); sh7372_pm_init(); + pm_clk_add(&fsi_device.dev, "spu2"); } static void __init ap4evb_timer_init(void) diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 3802f2afabe..5b36b6c5b44 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1582,8 +1582,13 @@ static void __init mackerel_init(void) platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); + sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); + sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); + hdmi_init_pm_clock(); sh7372_pm_init(); + pm_clk_add(&fsi_device.dev, "spu2"); } static void __init mackerel_timer_init(void) diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index c0800d83971..91f5779abdd 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c @@ -662,6 +662,7 @@ static struct clk_lookup lookups[] = { CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]), CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]), CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]), + CLKDEV_ICK_ID("spu2", "sh_fsi2", &mstp_clks[MSTP223]), }; void __init sh7372_clock_init(void) diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index df20d767017..ce595cee86c 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -12,6 +12,7 @@ #define __ASM_SH7372_H__ #include <linux/sh_clk.h> +#include <linux/pm_domain.h> /* * Pin Function Controller: @@ -470,4 +471,32 @@ extern struct clk sh7372_fsibck_clk; extern struct clk sh7372_fsidiva_clk; extern struct clk sh7372_fsidivb_clk; +struct platform_device; + +struct sh7372_pm_domain { + struct generic_pm_domain genpd; + unsigned int bit_shift; +}; + +static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) +{ + return container_of(d, struct sh7372_pm_domain, genpd); +} + +#ifdef CONFIG_PM +extern struct sh7372_pm_domain sh7372_a4lc; +extern struct sh7372_pm_domain sh7372_a4mp; +extern struct sh7372_pm_domain sh7372_d4; +extern struct sh7372_pm_domain sh7372_a3rv; +extern struct sh7372_pm_domain sh7372_a3ri; +extern struct sh7372_pm_domain sh7372_a3sg; + +extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); +extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, + struct platform_device *pdev); +#else +#define sh7372_init_pm_domain(pd) do { } while(0) +#define sh7372_add_device_to_domain(pd, pdev) do { } while(0) +#endif /* CONFIG_PM */ + #endif /* __ASM_SH7372_H__ */ diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 8e4aadf14c9..933fb411be0 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -15,16 +15,176 @@ #include <linux/list.h> #include <linux/err.h> #include <linux/slab.h> +#include <linux/pm_runtime.h> +#include <linux/platform_device.h> +#include <linux/delay.h> #include <asm/system.h> #include <asm/io.h> #include <asm/tlbflush.h> #include <mach/common.h> +#include <mach/sh7372.h> #define SMFRAM 0xe6a70000 #define SYSTBCR 0xe6150024 #define SBAR 0xe6180020 #define APARMBAREA 0xe6f10020 +#define SPDCR 0xe6180008 +#define SWUCR 0xe6180014 +#define PSTR 0xe6180080 + +#define PSTR_RETRIES 100 +#define PSTR_DELAY_US 10 + +#ifdef CONFIG_PM + +static int pd_power_down(struct generic_pm_domain *genpd) +{ + struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); + unsigned int mask = 1 << sh7372_pd->bit_shift; + + if (__raw_readl(PSTR) & mask) { + unsigned int retry_count; + + __raw_writel(mask, SPDCR); + + for (retry_count = PSTR_RETRIES; retry_count; retry_count--) { + if (!(__raw_readl(SPDCR) & mask)) + break; + cpu_relax(); + } + } + + pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n", + mask, __raw_readl(PSTR)); + + return 0; +} + +static int pd_power_up(struct generic_pm_domain *genpd) +{ + struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); + unsigned int mask = 1 << sh7372_pd->bit_shift; + unsigned int retry_count; + int ret = 0; + + if (__raw_readl(PSTR) & mask) + goto out; + + __raw_writel(mask, SWUCR); + + for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) { + if (!(__raw_readl(SWUCR) & mask)) + goto out; + if (retry_count > PSTR_RETRIES) + udelay(PSTR_DELAY_US); + else + cpu_relax(); + } + if (__raw_readl(SWUCR) & mask) + ret = -EIO; + + out: + pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n", + mask, __raw_readl(PSTR)); + + return ret; +} + +static int pd_power_up_a3rv(struct generic_pm_domain *genpd) +{ + int ret = pd_power_up(genpd); + + /* force A4LC on after A3RV has been requested on */ + pm_genpd_poweron(&sh7372_a4lc.genpd); + + return ret; +} + +static int pd_power_down_a3rv(struct generic_pm_domain *genpd) +{ + int ret = pd_power_down(genpd); + + /* try to power down A4LC after A3RV is requested off */ + genpd_queue_power_off_work(&sh7372_a4lc.genpd); + + return ret; +} + +static int pd_power_down_a4lc(struct generic_pm_domain *genpd) +{ + /* only power down A4LC if A3RV is off */ + if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift))) + return pd_power_down(genpd); + + return -EBUSY; +} + +static bool pd_active_wakeup(struct device *dev) +{ + return true; +} + +void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) +{ + struct generic_pm_domain *genpd = &sh7372_pd->genpd; + + pm_genpd_init(genpd, NULL, false); + genpd->stop_device = pm_clk_suspend; + genpd->start_device = pm_clk_resume; + genpd->active_wakeup = pd_active_wakeup; + + if (sh7372_pd == &sh7372_a4lc) { + genpd->power_off = pd_power_down_a4lc; + genpd->power_on = pd_power_up; + } else if (sh7372_pd == &sh7372_a3rv) { + genpd->power_off = pd_power_down_a3rv; + genpd->power_on = pd_power_up_a3rv; + } else { + genpd->power_off = pd_power_down; + genpd->power_on = pd_power_up; + } + genpd->power_on(&sh7372_pd->genpd); +} + +void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + if (!dev->power.subsys_data) { + pm_clk_init(dev); + pm_clk_add(dev, NULL); + } + pm_genpd_add_device(&sh7372_pd->genpd, dev); +} + +struct sh7372_pm_domain sh7372_a4lc = { + .bit_shift = 1, +}; + +struct sh7372_pm_domain sh7372_a4mp = { + .bit_shift = 2, +}; + +struct sh7372_pm_domain sh7372_d4 = { + .bit_shift = 3, +}; + +struct sh7372_pm_domain sh7372_a3rv = { + .bit_shift = 6, +}; + +struct sh7372_pm_domain sh7372_a3ri = { + .bit_shift = 8, +}; + +struct sh7372_pm_domain sh7372_a3sg = { + .bit_shift = 13, +}; + +#endif /* CONFIG_PM */ + static void sh7372_enter_core_standby(void) { void __iomem *smfram = (void __iomem *)SMFRAM; diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c index 2d1b67a59e4..6ec454e1e06 100644 --- a/arch/arm/mach-shmobile/pm_runtime.c +++ b/arch/arm/mach-shmobile/pm_runtime.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/io.h> #include <linux/pm_runtime.h> +#include <linux/pm_domain.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/sh_clk.h> @@ -28,31 +29,38 @@ static int default_platform_runtime_idle(struct device *dev) return pm_runtime_suspend(dev); } -static struct dev_power_domain default_power_domain = { +static struct dev_pm_domain default_pm_domain = { .ops = { - .runtime_suspend = pm_runtime_clk_suspend, - .runtime_resume = pm_runtime_clk_resume, + .runtime_suspend = pm_clk_suspend, + .runtime_resume = pm_clk_resume, .runtime_idle = default_platform_runtime_idle, USE_PLATFORM_PM_SLEEP_OPS }, }; -#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain) +#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain) #else -#define DEFAULT_PWR_DOMAIN_PTR NULL +#define DEFAULT_PM_DOMAIN_PTR NULL #endif /* CONFIG_PM_RUNTIME */ static struct pm_clk_notifier_block platform_bus_notifier = { - .pwr_domain = DEFAULT_PWR_DOMAIN_PTR, + .pm_domain = DEFAULT_PM_DOMAIN_PTR, .con_ids = { NULL, }, }; static int __init sh_pm_runtime_init(void) { - pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); + pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } core_initcall(sh_pm_runtime_init); + +static int __init sh_pm_runtime_late_init(void) +{ + pm_genpd_poweroff_unused(); + return 0; +} +late_initcall(sh_pm_runtime_late_init); diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index cd807eea69e..79f0413d872 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -841,11 +841,22 @@ static struct platform_device *sh7372_late_devices[] __initdata = { void __init sh7372_add_standard_devices(void) { + sh7372_init_pm_domain(&sh7372_a4lc); + sh7372_init_pm_domain(&sh7372_a4mp); + sh7372_init_pm_domain(&sh7372_d4); + sh7372_init_pm_domain(&sh7372_a3rv); + sh7372_init_pm_domain(&sh7372_a3ri); + sh7372_init_pm_domain(&sh7372_a3sg); + platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); platform_add_devices(sh7372_late_devices, ARRAY_SIZE(sh7372_late_devices)); + + sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device); } void __init sh7372_add_early_devices(void) diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index 823c703e573..ed58ef9019b 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile @@ -4,7 +4,6 @@ obj-y += io.o obj-y += irq.o obj-y += clock.o obj-y += timer.o -obj-y += gpio.o obj-y += pinmux.o obj-y += powergate.o obj-y += fuse.o diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c deleted file mode 100644 index 919d6383773..00000000000 --- a/arch/arm/mach-tegra/gpio.c +++ /dev/null @@ -1,431 +0,0 @@ -/* - * arch/arm/mach-tegra/gpio.c - * - * Copyright (c) 2010 Google, Inc - * - * Author: - * Erik Gilling <konkers@google.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/init.h> -#include <linux/irq.h> -#include <linux/interrupt.h> - -#include <linux/io.h> -#include <linux/gpio.h> - -#include <asm/mach/irq.h> - -#include <mach/iomap.h> -#include <mach/suspend.h> - -#define GPIO_BANK(x) ((x) >> 5) -#define GPIO_PORT(x) (((x) >> 3) & 0x3) -#define GPIO_BIT(x) ((x) & 0x7) - -#define GPIO_REG(x) (IO_TO_VIRT(TEGRA_GPIO_BASE) + \ - GPIO_BANK(x) * 0x80 + \ - GPIO_PORT(x) * 4) - -#define GPIO_CNF(x) (GPIO_REG(x) + 0x00) -#define GPIO_OE(x) (GPIO_REG(x) + 0x10) -#define GPIO_OUT(x) (GPIO_REG(x) + 0X20) -#define GPIO_IN(x) (GPIO_REG(x) + 0x30) -#define GPIO_INT_STA(x) (GPIO_REG(x) + 0x40) -#define GPIO_INT_ENB(x) (GPIO_REG(x) + 0x50) -#define GPIO_INT_LVL(x) (GPIO_REG(x) + 0x60) -#define GPIO_INT_CLR(x) (GPIO_REG(x) + 0x70) - -#define GPIO_MSK_CNF(x) (GPIO_REG(x) + 0x800) -#define GPIO_MSK_OE(x) (GPIO_REG(x) + 0x810) -#define GPIO_MSK_OUT(x) (GPIO_REG(x) + 0X820) -#define GPIO_MSK_INT_STA(x) (GPIO_REG(x) + 0x840) -#define GPIO_MSK_INT_ENB(x) (GPIO_REG(x) + 0x850) -#define GPIO_MSK_INT_LVL(x) (GPIO_REG(x) + 0x860) - -#define GPIO_INT_LVL_MASK 0x010101 -#define GPIO_INT_LVL_EDGE_RISING 0x000101 -#define GPIO_INT_LVL_EDGE_FALLING 0x000100 -#define GPIO_INT_LVL_EDGE_BOTH 0x010100 -#define GPIO_INT_LVL_LEVEL_HIGH 0x000001 -#define GPIO_INT_LVL_LEVEL_LOW 0x000000 - -struct tegra_gpio_bank { - int bank; - int irq; - spinlock_t lvl_lock[4]; -#ifdef CONFIG_PM - u32 cnf[4]; - u32 out[4]; - u32 oe[4]; - u32 int_enb[4]; - u32 int_lvl[4]; -#endif -}; - - -static struct tegra_gpio_bank tegra_gpio_banks[] = { - {.bank = 0, .irq = INT_GPIO1}, - {.bank = 1, .irq = INT_GPIO2}, - {.bank = 2, .irq = INT_GPIO3}, - {.bank = 3, .irq = INT_GPIO4}, - {.bank = 4, .irq = INT_GPIO5}, - {.bank = 5, .irq = INT_GPIO6}, - {.bank = 6, .irq = INT_GPIO7}, -}; - -static int tegra_gpio_compose(int bank, int port, int bit) -{ - return (bank << 5) | ((port & 0x3) << 3) | (bit & 0x7); -} - -static void tegra_gpio_mask_write(u32 reg, int gpio, int value) -{ - u32 val; - - val = 0x100 << GPIO_BIT(gpio); - if (value) - val |= 1 << GPIO_BIT(gpio); - __raw_writel(val, reg); -} - -void tegra_gpio_enable(int gpio) -{ - tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 1); -} - -void tegra_gpio_disable(int gpio) -{ - tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 0); -} - -static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - tegra_gpio_mask_write(GPIO_MSK_OUT(offset), offset, value); -} - -static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - return (__raw_readl(GPIO_IN(offset)) >> GPIO_BIT(offset)) & 0x1; -} - -static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -{ - tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 0); - return 0; -} - -static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset, - int value) -{ - tegra_gpio_set(chip, offset, value); - tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 1); - return 0; -} - - - -static struct gpio_chip tegra_gpio_chip = { - .label = "tegra-gpio", - .direction_input = tegra_gpio_direction_input, - .get = tegra_gpio_get, - .direction_output = tegra_gpio_direction_output, - .set = tegra_gpio_set, - .base = 0, - .ngpio = TEGRA_NR_GPIOS, -}; - -static void tegra_gpio_irq_ack(struct irq_data *d) -{ - int gpio = d->irq - INT_GPIO_BASE; - - __raw_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio)); -} - -static void tegra_gpio_irq_mask(struct irq_data *d) -{ - int gpio = d->irq - INT_GPIO_BASE; - - tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 0); -} - -static void tegra_gpio_irq_unmask(struct irq_data *d) -{ - int gpio = d->irq - INT_GPIO_BASE; - - tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 1); -} - -static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type) -{ - int gpio = d->irq - INT_GPIO_BASE; - struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d); - int port = GPIO_PORT(gpio); - int lvl_type; - int val; - unsigned long flags; - - switch (type & IRQ_TYPE_SENSE_MASK) { - case IRQ_TYPE_EDGE_RISING: - lvl_type = GPIO_INT_LVL_EDGE_RISING; - break; - - case IRQ_TYPE_EDGE_FALLING: - lvl_type = GPIO_INT_LVL_EDGE_FALLING; - break; - - case IRQ_TYPE_EDGE_BOTH: - lvl_type = GPIO_INT_LVL_EDGE_BOTH; - break; - - case IRQ_TYPE_LEVEL_HIGH: - lvl_type = GPIO_INT_LVL_LEVEL_HIGH; - break; - - case IRQ_TYPE_LEVEL_LOW: - lvl_type = GPIO_INT_LVL_LEVEL_LOW; - break; - - default: - return -EINVAL; - } - - spin_lock_irqsave(&bank->lvl_lock[port], flags); - - val = __raw_readl(GPIO_INT_LVL(gpio)); - val &= ~(GPIO_INT_LVL_MASK << GPIO_BIT(gpio)); - val |= lvl_type << GPIO_BIT(gpio); - __raw_writel(val, GPIO_INT_LVL(gpio)); - - spin_unlock_irqrestore(&bank->lvl_lock[port], flags); - - if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) - __irq_set_handler_locked(d->irq, handle_level_irq); - else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) - __irq_set_handler_locked(d->irq, handle_edge_irq); - - return 0; -} - -static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - struct tegra_gpio_bank *bank; - int port; - int pin; - int unmasked = 0; - struct irq_chip *chip = irq_desc_get_chip(desc); - - chained_irq_enter(chip, desc); - - bank = irq_get_handler_data(irq); - - for (port = 0; port < 4; port++) { - int gpio = tegra_gpio_compose(bank->bank, port, 0); - unsigned long sta = __raw_readl(GPIO_INT_STA(gpio)) & - __raw_readl(GPIO_INT_ENB(gpio)); - u32 lvl = __raw_readl(GPIO_INT_LVL(gpio)); - - for_each_set_bit(pin, &sta, 8) { - __raw_writel(1 << pin, GPIO_INT_CLR(gpio)); - - /* if gpio is edge triggered, clear condition - * before executing the hander so that we don't - * miss edges - */ - if (lvl & (0x100 << pin)) { - unmasked = 1; - chained_irq_exit(chip, desc); - } - - generic_handle_irq(gpio_to_irq(gpio + pin)); - } - } - - if (!unmasked) - chained_irq_exit(chip, desc); - -} - -#ifdef CONFIG_PM -void tegra_gpio_resume(void) -{ - unsigned long flags; - int b; - int p; - - local_irq_save(flags); - - for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { - struct tegra_gpio_bank *bank = &tegra_gpio_banks[b]; - - for (p = 0; p < ARRAY_SIZE(bank->oe); p++) { - unsigned int gpio = (b<<5) | (p<<3); - __raw_writel(bank->cnf[p], GPIO_CNF(gpio)); - __raw_writel(bank->out[p], GPIO_OUT(gpio)); - __raw_writel(bank->oe[p], GPIO_OE(gpio)); - __raw_writel(bank->int_lvl[p], GPIO_INT_LVL(gpio)); - __raw_writel(bank->int_enb[p], GPIO_INT_ENB(gpio)); - } - } - - local_irq_restore(flags); -} - -void tegra_gpio_suspend(void) -{ - unsigned long flags; - int b; - int p; - - local_irq_save(flags); - for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { - struct tegra_gpio_bank *bank = &tegra_gpio_banks[b]; - - for (p = 0; p < ARRAY_SIZE(bank->oe); p++) { - unsigned int gpio = (b<<5) | (p<<3); - bank->cnf[p] = __raw_readl(GPIO_CNF(gpio)); - bank->out[p] = __raw_readl(GPIO_OUT(gpio)); - bank->oe[p] = __raw_readl(GPIO_OE(gpio)); - bank->int_enb[p] = __raw_readl(GPIO_INT_ENB(gpio)); - bank->int_lvl[p] = __raw_readl(GPIO_INT_LVL(gpio)); - } - } - local_irq_restore(flags); -} - -static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable) -{ - struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d); - return irq_set_irq_wake(bank->irq, enable); -} -#endif - -static struct irq_chip tegra_gpio_irq_chip = { - .name = "GPIO", - .irq_ack = tegra_gpio_irq_ack, - .irq_mask = tegra_gpio_irq_mask, - .irq_unmask = tegra_gpio_irq_unmask, - .irq_set_type = tegra_gpio_irq_set_type, -#ifdef CONFIG_PM - .irq_set_wake = tegra_gpio_wake_enable, -#endif -}; - - -/* This lock class tells lockdep that GPIO irqs are in a different - * category than their parents, so it won't report false recursion. - */ -static struct lock_class_key gpio_lock_class; - -static int __init tegra_gpio_init(void) -{ - struct tegra_gpio_bank *bank; - int i; - int j; - - for (i = 0; i < 7; i++) { - for (j = 0; j < 4; j++) { - int gpio = tegra_gpio_compose(i, j, 0); - __raw_writel(0x00, GPIO_INT_ENB(gpio)); - } - } - - gpiochip_add(&tegra_gpio_chip); - - for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) { - bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))]; - - irq_set_lockdep_class(i, &gpio_lock_class); - irq_set_chip_data(i, bank); - irq_set_chip_and_handler(i, &tegra_gpio_irq_chip, - handle_simple_irq); - set_irq_flags(i, IRQF_VALID); - } - - for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) { - bank = &tegra_gpio_banks[i]; - - irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler); - irq_set_handler_data(bank->irq, bank); - - for (j = 0; j < 4; j++) - spin_lock_init(&bank->lvl_lock[j]); - } - - return 0; -} - -postcore_initcall(tegra_gpio_init); - -void __init tegra_gpio_config(struct tegra_gpio_table *table, int num) -{ - int i; - - for (i = 0; i < num; i++) { - int gpio = table[i].gpio; - - if (table[i].enable) - tegra_gpio_enable(gpio); - else - tegra_gpio_disable(gpio); - } -} - -#ifdef CONFIG_DEBUG_FS - -#include <linux/debugfs.h> -#include <linux/seq_file.h> - -static int dbg_gpio_show(struct seq_file *s, void *unused) -{ - int i; - int j; - - for (i = 0; i < 7; i++) { - for (j = 0; j < 4; j++) { - int gpio = tegra_gpio_compose(i, j, 0); - seq_printf(s, - "%d:%d %02x %02x %02x %02x %02x %02x %06x\n", - i, j, - __raw_readl(GPIO_CNF(gpio)), - __raw_readl(GPIO_OE(gpio)), - __raw_readl(GPIO_OUT(gpio)), - __raw_readl(GPIO_IN(gpio)), - __raw_readl(GPIO_INT_STA(gpio)), - __raw_readl(GPIO_INT_ENB(gpio)), - __raw_readl(GPIO_INT_LVL(gpio))); - } - } - return 0; -} - -static int dbg_gpio_open(struct inode *inode, struct file *file) -{ - return single_open(file, dbg_gpio_show, &inode->i_private); -} - -static const struct file_operations debug_fops = { - .open = dbg_gpio_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int __init tegra_gpio_debuginit(void) -{ - (void) debugfs_create_file("tegra_gpio", S_IRUGO, - NULL, NULL, &debug_fops); - return 0; -} -late_initcall(tegra_gpio_debuginit); -#endif diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bc0e1d88fd3..9ea4f7ddd66 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -318,11 +318,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) fault = __do_page_fault(mm, addr, fsr, tsk); up_read(&mm->mmap_sem); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); if (fault & VM_FAULT_MAJOR) - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); else if (fault & VM_FAULT_MINOR) - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); /* * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile index a1387875a49..d53c35fe2ea 100644 --- a/arch/arm/plat-mxc/Makefile +++ b/arch/arm/plat-mxc/Makefile @@ -3,7 +3,7 @@ # # Common support -obj-y := clock.o gpio.o time.o devices.o cpu.o system.o irq-common.o +obj-y := clock.o time.o devices.o cpu.o system.o irq-common.o # MX51 uses the TZIC interrupt controller, older platforms use AVIC obj-$(CONFIG_MXC_TZIC) += tzic.o diff --git a/arch/arm/plat-mxc/devices.c b/arch/arm/plat-mxc/devices.c index eee1b6096a0..fb166b20f60 100644 --- a/arch/arm/plat-mxc/devices.c +++ b/arch/arm/plat-mxc/devices.c @@ -89,3 +89,14 @@ err: return pdev; } + +struct device mxc_aips_bus = { + .init_name = "mxc_aips", + .parent = &platform_bus, +}; + +static int __init mxc_device_init(void) +{ + return device_register(&mxc_aips_bus); +} +core_initcall(mxc_device_init); diff --git a/arch/arm/plat-mxc/devices/Makefile b/arch/arm/plat-mxc/devices/Makefile index ad2922acf48..b41bf972b54 100644 --- a/arch/arm/plat-mxc/devices/Makefile +++ b/arch/arm/plat-mxc/devices/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_IMX_HAVE_PLATFORM_FEC) += platform-fec.o obj-$(CONFIG_IMX_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o obj-$(CONFIG_IMX_HAVE_PLATFORM_FSL_USB2_UDC) += platform-fsl-usb2-udc.o obj-$(CONFIG_IMX_HAVE_PLATFORM_GPIO_KEYS) += platform-gpio_keys.o +obj-y += platform-gpio-mxc.o obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX21_HCD) += platform-imx21-hcd.o obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX2_WDT) += platform-imx2-wdt.o obj-$(CONFIG_IMX_HAVE_PLATFORM_IMXDI_RTC) += platform-imxdi_rtc.o diff --git a/arch/arm/plat-mxc/devices/platform-fec.c b/arch/arm/plat-mxc/devices/platform-fec.c index ccc789e21da..4fc6ffc2a13 100644 --- a/arch/arm/plat-mxc/devices/platform-fec.c +++ b/arch/arm/plat-mxc/devices/platform-fec.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <asm/sizes.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c b/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c index 59c33f6e401..23ce08e6ffd 100644 --- a/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c +++ b/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-gpio-mxc.c b/arch/arm/plat-mxc/devices/platform-gpio-mxc.c new file mode 100644 index 00000000000..a7919a24103 --- /dev/null +++ b/arch/arm/plat-mxc/devices/platform-gpio-mxc.c @@ -0,0 +1,32 @@ +/* + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2011 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ +#include <mach/devices-common.h> + +struct platform_device *__init mxc_register_gpio(char *name, int id, + resource_size_t iobase, resource_size_t iosize, int irq, int irq_high) +{ + struct resource res[] = { + { + .start = iobase, + .end = iobase + iosize - 1, + .flags = IORESOURCE_MEM, + }, { + .start = irq, + .end = irq, + .flags = IORESOURCE_IRQ, + }, { + .start = irq_high, + .end = irq_high, + .flags = IORESOURCE_IRQ, + }, + }; + + return platform_device_register_resndata(&mxc_aips_bus, + name, id, res, ARRAY_SIZE(res), NULL, 0); +} diff --git a/arch/arm/plat-mxc/devices/platform-imx-fb.c b/arch/arm/plat-mxc/devices/platform-imx-fb.c index 79a1cb18a5b..2b0b5e0aa99 100644 --- a/arch/arm/plat-mxc/devices/platform-imx-fb.c +++ b/arch/arm/plat-mxc/devices/platform-imx-fb.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-ipu-core.c b/arch/arm/plat-mxc/devices/platform-ipu-core.c index edf65034aea..79d340ae0af 100644 --- a/arch/arm/plat-mxc/devices/platform-ipu-core.c +++ b/arch/arm/plat-mxc/devices/platform-ipu-core.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-mxc-ehci.c b/arch/arm/plat-mxc/devices/platform-mxc-ehci.c index cc488f4b620..e1763e03e7c 100644 --- a/arch/arm/plat-mxc/devices/platform-mxc-ehci.c +++ b/arch/arm/plat-mxc/devices/platform-mxc-ehci.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-mxc-mmc.c b/arch/arm/plat-mxc/devices/platform-mxc-mmc.c index 90d762f6f93..540d3a7d92d 100644 --- a/arch/arm/plat-mxc/devices/platform-mxc-mmc.c +++ b/arch/arm/plat-mxc/devices/platform-mxc-mmc.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c index f97eb3615b2..9bfae8bd5b8 100644 --- a/arch/arm/plat-mxc/devices/platform-spi_imx.c +++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c @@ -40,9 +40,10 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = { #endif #ifdef CONFIG_SOC_IMX25 +/* i.mx25 has the i.mx35 type cspi */ const struct imx_spi_imx_data imx25_cspi_data[] __initconst = { #define imx25_cspi_data_entry(_id, _hwid) \ - imx_spi_imx_data_entry(MX25, CSPI, "imx25-cspi", _id, _hwid, SZ_16K) + imx_spi_imx_data_entry(MX25, CSPI, "imx35-cspi", _id, _hwid, SZ_16K) imx25_cspi_data_entry(0, 1), imx25_cspi_data_entry(1, 2), imx25_cspi_data_entry(2, 3), @@ -79,8 +80,9 @@ const struct imx_spi_imx_data imx35_cspi_data[] __initconst = { #endif /* ifdef CONFIG_SOC_IMX35 */ #ifdef CONFIG_SOC_IMX51 +/* i.mx51 has the i.mx35 type cspi */ const struct imx_spi_imx_data imx51_cspi_data __initconst = - imx_spi_imx_data_entry_single(MX51, CSPI, "imx51-cspi", 2, , SZ_4K); + imx_spi_imx_data_entry_single(MX51, CSPI, "imx35-cspi", 2, , SZ_4K); const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = { #define imx51_ecspi_data_entry(_id, _hwid) \ @@ -91,12 +93,14 @@ const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = { #endif /* ifdef CONFIG_SOC_IMX51 */ #ifdef CONFIG_SOC_IMX53 +/* i.mx53 has the i.mx35 type cspi */ const struct imx_spi_imx_data imx53_cspi_data __initconst = - imx_spi_imx_data_entry_single(MX53, CSPI, "imx53-cspi", 0, , SZ_4K); + imx_spi_imx_data_entry_single(MX53, CSPI, "imx35-cspi", 0, , SZ_4K); +/* i.mx53 has the i.mx51 type ecspi */ const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = { #define imx53_ecspi_data_entry(_id, _hwid) \ - imx_spi_imx_data_entry(MX53, ECSPI, "imx53-ecspi", _id, _hwid, SZ_4K) + imx_spi_imx_data_entry(MX53, ECSPI, "imx51-ecspi", _id, _hwid, SZ_4K) imx53_ecspi_data_entry(0, 1), imx53_ecspi_data_entry(1, 2), }; diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c deleted file mode 100644 index 6cd6d7f686f..00000000000 --- a/arch/arm/plat-mxc/gpio.c +++ /dev/null @@ -1,361 +0,0 @@ -/* - * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de> - * Copyright 2008 Juergen Beisert, kernel@pengutronix.de - * - * Based on code from Freescale, - * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/gpio.h> -#include <mach/hardware.h> -#include <asm-generic/bug.h> - -static struct mxc_gpio_port *mxc_gpio_ports; -static int gpio_table_size; - -#define cpu_is_mx1_mx2() (cpu_is_mx1() || cpu_is_mx2()) - -#define GPIO_DR (cpu_is_mx1_mx2() ? 0x1c : 0x00) -#define GPIO_GDIR (cpu_is_mx1_mx2() ? 0x00 : 0x04) -#define GPIO_PSR (cpu_is_mx1_mx2() ? 0x24 : 0x08) -#define GPIO_ICR1 (cpu_is_mx1_mx2() ? 0x28 : 0x0C) -#define GPIO_ICR2 (cpu_is_mx1_mx2() ? 0x2C : 0x10) -#define GPIO_IMR (cpu_is_mx1_mx2() ? 0x30 : 0x14) -#define GPIO_ISR (cpu_is_mx1_mx2() ? 0x34 : 0x18) - -#define GPIO_INT_LOW_LEV (cpu_is_mx1_mx2() ? 0x3 : 0x0) -#define GPIO_INT_HIGH_LEV (cpu_is_mx1_mx2() ? 0x2 : 0x1) -#define GPIO_INT_RISE_EDGE (cpu_is_mx1_mx2() ? 0x0 : 0x2) -#define GPIO_INT_FALL_EDGE (cpu_is_mx1_mx2() ? 0x1 : 0x3) -#define GPIO_INT_NONE 0x4 - -/* Note: This driver assumes 32 GPIOs are handled in one register */ - -static void _clear_gpio_irqstatus(struct mxc_gpio_port *port, u32 index) -{ - __raw_writel(1 << index, port->base + GPIO_ISR); -} - -static void _set_gpio_irqenable(struct mxc_gpio_port *port, u32 index, - int enable) -{ - u32 l; - - l = __raw_readl(port->base + GPIO_IMR); - l = (l & (~(1 << index))) | (!!enable << index); - __raw_writel(l, port->base + GPIO_IMR); -} - -static void gpio_ack_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - _clear_gpio_irqstatus(&mxc_gpio_ports[gpio / 32], gpio & 0x1f); -} - -static void gpio_mask_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 0); -} - -static void gpio_unmask_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 1); -} - -static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset); - -static int gpio_set_irq_type(struct irq_data *d, u32 type) -{ - u32 gpio = irq_to_gpio(d->irq); - struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32]; - u32 bit, val; - int edge; - void __iomem *reg = port->base; - - port->both_edges &= ~(1 << (gpio & 31)); - switch (type) { - case IRQ_TYPE_EDGE_RISING: - edge = GPIO_INT_RISE_EDGE; - break; - case IRQ_TYPE_EDGE_FALLING: - edge = GPIO_INT_FALL_EDGE; - break; - case IRQ_TYPE_EDGE_BOTH: - val = mxc_gpio_get(&port->chip, gpio & 31); - if (val) { - edge = GPIO_INT_LOW_LEV; - pr_debug("mxc: set GPIO %d to low trigger\n", gpio); - } else { - edge = GPIO_INT_HIGH_LEV; - pr_debug("mxc: set GPIO %d to high trigger\n", gpio); - } - port->both_edges |= 1 << (gpio & 31); - break; - case IRQ_TYPE_LEVEL_LOW: - edge = GPIO_INT_LOW_LEV; - break; - case IRQ_TYPE_LEVEL_HIGH: - edge = GPIO_INT_HIGH_LEV; - break; - default: - return -EINVAL; - } - - reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ - bit = gpio & 0xf; - val = __raw_readl(reg) & ~(0x3 << (bit << 1)); - __raw_writel(val | (edge << (bit << 1)), reg); - _clear_gpio_irqstatus(port, gpio & 0x1f); - - return 0; -} - -static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) -{ - void __iomem *reg = port->base; - u32 bit, val; - int edge; - - reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ - bit = gpio & 0xf; - val = __raw_readl(reg); - edge = (val >> (bit << 1)) & 3; - val &= ~(0x3 << (bit << 1)); - if (edge == GPIO_INT_HIGH_LEV) { - edge = GPIO_INT_LOW_LEV; - pr_debug("mxc: switch GPIO %d to low trigger\n", gpio); - } else if (edge == GPIO_INT_LOW_LEV) { - edge = GPIO_INT_HIGH_LEV; - pr_debug("mxc: switch GPIO %d to high trigger\n", gpio); - } else { - pr_err("mxc: invalid configuration for GPIO %d: %x\n", - gpio, edge); - return; - } - __raw_writel(val | (edge << (bit << 1)), reg); -} - -/* handle 32 interrupts in one status register */ -static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) -{ - u32 gpio_irq_no_base = port->virtual_irq_start; - - while (irq_stat != 0) { - int irqoffset = fls(irq_stat) - 1; - - if (port->both_edges & (1 << irqoffset)) - mxc_flip_edge(port, irqoffset); - - generic_handle_irq(gpio_irq_no_base + irqoffset); - - irq_stat &= ~(1 << irqoffset); - } -} - -/* MX1 and MX3 has one interrupt *per* gpio port */ -static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) -{ - u32 irq_stat; - struct mxc_gpio_port *port = irq_get_handler_data(irq); - - irq_stat = __raw_readl(port->base + GPIO_ISR) & - __raw_readl(port->base + GPIO_IMR); - - mxc_gpio_irq_handler(port, irq_stat); -} - -/* MX2 has one interrupt *for all* gpio ports */ -static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) -{ - int i; - u32 irq_msk, irq_stat; - struct mxc_gpio_port *port = irq_get_handler_data(irq); - - /* walk through all interrupt status registers */ - for (i = 0; i < gpio_table_size; i++) { - irq_msk = __raw_readl(port[i].base + GPIO_IMR); - if (!irq_msk) - continue; - - irq_stat = __raw_readl(port[i].base + GPIO_ISR) & irq_msk; - if (irq_stat) - mxc_gpio_irq_handler(&port[i], irq_stat); - } -} - -/* - * Set interrupt number "irq" in the GPIO as a wake-up source. - * While system is running, all registered GPIO interrupts need to have - * wake-up enabled. When system is suspended, only selected GPIO interrupts - * need to have wake-up enabled. - * @param irq interrupt source number - * @param enable enable as wake-up if equal to non-zero - * @return This function returns 0 on success. - */ -static int gpio_set_wake_irq(struct irq_data *d, u32 enable) -{ - u32 gpio = irq_to_gpio(d->irq); - u32 gpio_idx = gpio & 0x1F; - struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32]; - - if (enable) { - if (port->irq_high && (gpio_idx >= 16)) - enable_irq_wake(port->irq_high); - else - enable_irq_wake(port->irq); - } else { - if (port->irq_high && (gpio_idx >= 16)) - disable_irq_wake(port->irq_high); - else - disable_irq_wake(port->irq); - } - - return 0; -} - -static struct irq_chip gpio_irq_chip = { - .name = "GPIO", - .irq_ack = gpio_ack_irq, - .irq_mask = gpio_mask_irq, - .irq_unmask = gpio_unmask_irq, - .irq_set_type = gpio_set_irq_type, - .irq_set_wake = gpio_set_wake_irq, -}; - -static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset, - int dir) -{ - struct mxc_gpio_port *port = - container_of(chip, struct mxc_gpio_port, chip); - u32 l; - unsigned long flags; - - spin_lock_irqsave(&port->lock, flags); - l = __raw_readl(port->base + GPIO_GDIR); - if (dir) - l |= 1 << offset; - else - l &= ~(1 << offset); - __raw_writel(l, port->base + GPIO_GDIR); - spin_unlock_irqrestore(&port->lock, flags); -} - -static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - struct mxc_gpio_port *port = - container_of(chip, struct mxc_gpio_port, chip); - void __iomem *reg = port->base + GPIO_DR; - u32 l; - unsigned long flags; - - spin_lock_irqsave(&port->lock, flags); - l = (__raw_readl(reg) & (~(1 << offset))) | (!!value << offset); - __raw_writel(l, reg); - spin_unlock_irqrestore(&port->lock, flags); -} - -static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct mxc_gpio_port *port = - container_of(chip, struct mxc_gpio_port, chip); - - return (__raw_readl(port->base + GPIO_PSR) >> offset) & 1; -} - -static int mxc_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -{ - _set_gpio_direction(chip, offset, 0); - return 0; -} - -static int mxc_gpio_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - mxc_gpio_set(chip, offset, value); - _set_gpio_direction(chip, offset, 1); - return 0; -} - -/* - * This lock class tells lockdep that GPIO irqs are in a different - * category than their parents, so it won't report false recursion. - */ -static struct lock_class_key gpio_lock_class; - -int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt) -{ - int i, j; - - /* save for local usage */ - mxc_gpio_ports = port; - gpio_table_size = cnt; - - printk(KERN_INFO "MXC GPIO hardware\n"); - - for (i = 0; i < cnt; i++) { - /* disable the interrupt and clear the status */ - __raw_writel(0, port[i].base + GPIO_IMR); - __raw_writel(~0, port[i].base + GPIO_ISR); - for (j = port[i].virtual_irq_start; - j < port[i].virtual_irq_start + 32; j++) { - irq_set_lockdep_class(j, &gpio_lock_class); - irq_set_chip_and_handler(j, &gpio_irq_chip, - handle_level_irq); - set_irq_flags(j, IRQF_VALID); - } - - /* register gpio chip */ - port[i].chip.direction_input = mxc_gpio_direction_input; - port[i].chip.direction_output = mxc_gpio_direction_output; - port[i].chip.get = mxc_gpio_get; - port[i].chip.set = mxc_gpio_set; - port[i].chip.base = i * 32; - port[i].chip.ngpio = 32; - - spin_lock_init(&port[i].lock); - - /* its a serious configuration bug when it fails */ - BUG_ON( gpiochip_add(&port[i].chip) < 0 ); - - if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) { - /* setup one handler for each entry */ - irq_set_chained_handler(port[i].irq, - mx3_gpio_irq_handler); - irq_set_handler_data(port[i].irq, &port[i]); - if (port[i].irq_high) { - /* setup handler for GPIO 16 to 31 */ - irq_set_chained_handler(port[i].irq_high, - mx3_gpio_irq_handler); - irq_set_handler_data(port[i].irq_high, - &port[i]); - } - } - } - - if (cpu_is_mx2()) { - /* setup one handler for all GPIO interrupts */ - irq_set_chained_handler(port[0].irq, mx2_gpio_irq_handler); - irq_set_handler_data(port[0].irq, port); - } - - return 0; -} diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h index da7991832af..4e3d97890d6 100644 --- a/arch/arm/plat-mxc/include/mach/common.h +++ b/arch/arm/plat-mxc/include/mach/common.h @@ -43,6 +43,15 @@ extern void mx35_init_irq(void); extern void mx50_init_irq(void); extern void mx51_init_irq(void); extern void mx53_init_irq(void); +extern void imx1_soc_init(void); +extern void imx21_soc_init(void); +extern void imx25_soc_init(void); +extern void imx27_soc_init(void); +extern void imx31_soc_init(void); +extern void imx35_soc_init(void); +extern void imx50_soc_init(void); +extern void imx51_soc_init(void); +extern void imx53_soc_init(void); extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq); extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int); extern int mx1_clocks_init(unsigned long fref); @@ -55,7 +64,8 @@ extern int mx51_clocks_init(unsigned long ckil, unsigned long osc, unsigned long ckih1, unsigned long ckih2); extern int mx53_clocks_init(unsigned long ckil, unsigned long osc, unsigned long ckih1, unsigned long ckih2); -extern int mxc_register_gpios(void); +extern struct platform_device *mxc_register_gpio(char *name, int id, + resource_size_t iobase, resource_size_t iosize, int irq, int irq_high); extern int mxc_register_device(struct platform_device *pdev, void *data); extern void mxc_set_cpu_type(unsigned int type); extern void mxc_arch_reset_init(void __iomem *); diff --git a/arch/arm/plat-mxc/include/mach/devices-common.h b/arch/arm/plat-mxc/include/mach/devices-common.h index fa8477337f9..03f62664537 100644 --- a/arch/arm/plat-mxc/include/mach/devices-common.h +++ b/arch/arm/plat-mxc/include/mach/devices-common.h @@ -10,6 +10,8 @@ #include <linux/platform_device.h> #include <linux/init.h> +extern struct device mxc_aips_bus; + struct platform_device *imx_add_platform_device_dmamask( const char *name, int id, const struct resource *res, unsigned int num_resources, diff --git a/arch/arm/plat-mxc/include/mach/gpio.h b/arch/arm/plat-mxc/include/mach/gpio.h index a2747f12813..31c820c1b79 100644 --- a/arch/arm/plat-mxc/include/mach/gpio.h +++ b/arch/arm/plat-mxc/include/mach/gpio.h @@ -36,31 +36,4 @@ #define gpio_to_irq(gpio) (MXC_GPIO_IRQ_START + (gpio)) #define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START) -struct mxc_gpio_port { - void __iomem *base; - int irq; - int irq_high; - int virtual_irq_start; - struct gpio_chip chip; - u32 both_edges; - spinlock_t lock; -}; - -#define DEFINE_IMX_GPIO_PORT_IRQ_HIGH(soc, _id, _hwid, _irq, _irq_high) \ - { \ - .chip.label = "gpio-" #_id, \ - .irq = _irq, \ - .irq_high = _irq_high, \ - .base = soc ## _IO_ADDRESS( \ - soc ## _GPIO ## _hwid ## _BASE_ADDR), \ - .virtual_irq_start = MXC_GPIO_IRQ_START + (_id) * 32, \ - } - -#define DEFINE_IMX_GPIO_PORT_IRQ(soc, _id, _hwid, _irq) \ - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(soc, _id, _hwid, _irq, 0) -#define DEFINE_IMX_GPIO_PORT(soc, _id, _hwid) \ - DEFINE_IMX_GPIO_PORT_IRQ(soc, _id, _hwid, 0) - -int mxc_gpio_init(struct mxc_gpio_port*, int); - #endif diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h index 35c89bcdf75..00e812bbd81 100644 --- a/arch/arm/plat-mxc/include/mach/irqs.h +++ b/arch/arm/plat-mxc/include/mach/irqs.h @@ -11,6 +11,8 @@ #ifndef __ASM_ARCH_MXC_IRQS_H__ #define __ASM_ARCH_MXC_IRQS_H__ +#include <asm-generic/gpio.h> + /* * SoCs with TZIC interrupt controller have 128 IRQs, those with AVIC have 64 */ @@ -22,30 +24,13 @@ #define MXC_GPIO_IRQ_START MXC_INTERNAL_IRQS -/* these are ordered by size to support multi-SoC kernels */ -#if defined CONFIG_SOC_IMX53 -#define MXC_GPIO_IRQS (32 * 7) -#elif defined CONFIG_ARCH_MX2 -#define MXC_GPIO_IRQS (32 * 6) -#elif defined CONFIG_SOC_IMX50 -#define MXC_GPIO_IRQS (32 * 6) -#elif defined CONFIG_ARCH_MX1 -#define MXC_GPIO_IRQS (32 * 4) -#elif defined CONFIG_ARCH_MX25 -#define MXC_GPIO_IRQS (32 * 4) -#elif defined CONFIG_SOC_IMX51 -#define MXC_GPIO_IRQS (32 * 4) -#elif defined CONFIG_ARCH_MX3 -#define MXC_GPIO_IRQS (32 * 3) -#endif - /* * The next 16 interrupts are for board specific purposes. Since * the kernel can only run on one machine at a time, we can re-use * these. If you need more, increase MXC_BOARD_IRQS, but keep it * within sensible limits. */ -#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS) +#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + ARCH_NR_GPIOS) #ifdef CONFIG_MACH_MX31ADS_WM1133_EV1 #define MXC_BOARD_IRQS 80 diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index c44886062f8..685c78716d9 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h @@ -10,6 +10,7 @@ #define STE_DMA40_H #include <linux/dmaengine.h> +#include <linux/scatterlist.h> #include <linux/workqueue.h> #include <linux/interrupt.h> diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h index ec97e00cb58..91e8de3db08 100644 --- a/arch/arm/plat-omap/include/plat/gpio.h +++ b/arch/arm/plat-omap/include/plat/gpio.h @@ -174,12 +174,32 @@ struct omap_gpio_dev_attr { bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ }; +struct omap_gpio_reg_offs { + u16 revision; + u16 direction; + u16 datain; + u16 dataout; + u16 set_dataout; + u16 clr_dataout; + u16 irqstatus; + u16 irqstatus2; + u16 irqenable; + u16 set_irqenable; + u16 clr_irqenable; + u16 debounce; + u16 debounce_en; + + bool irqenable_inv; +}; + struct omap_gpio_platform_data { u16 virtual_irq_start; int bank_type; int bank_width; /* GPIO bank width */ int bank_stride; /* Only needed for omap1 MPUIO */ bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ + + struct omap_gpio_reg_offs *regs; }; /* TODO: Analyze removing gpio_bank_count usage from driver code */ diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h index e4c349ff9fd..ee405b36df4 100644 --- a/arch/arm/plat-omap/include/plat/omap_device.h +++ b/arch/arm/plat-omap/include/plat/omap_device.h @@ -44,6 +44,10 @@ extern struct device omap_device_parent; #define OMAP_DEVICE_STATE_IDLE 2 #define OMAP_DEVICE_STATE_SHUTDOWN 3 +/* omap_device.flags values */ +#define OMAP_DEVICE_SUSPENDED BIT(0) +#define OMAP_DEVICE_NO_IDLE_ON_SUSPEND BIT(1) + /** * struct omap_device - omap_device wrapper for platform_devices * @pdev: platform_device @@ -73,6 +77,7 @@ struct omap_device { s8 pm_lat_level; u8 hwmods_cnt; u8 _state; + u8 flags; }; /* Device driver interface (call via platform_data fn ptrs) */ @@ -117,6 +122,10 @@ int omap_device_enable_hwmods(struct omap_device *od); int omap_device_disable_clocks(struct omap_device *od); int omap_device_enable_clocks(struct omap_device *od); +static inline void omap_device_disable_idle_on_suspend(struct omap_device *od) +{ + od->flags |= OMAP_DEVICE_NO_IDLE_ON_SUSPEND; +} /* * Entries should be kept in latency order ascending diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index 49fc0df0c21..2526fa312b8 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c @@ -537,6 +537,7 @@ int omap_early_device_register(struct omap_device *od) return 0; } +#ifdef CONFIG_PM_RUNTIME static int _od_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -563,13 +564,55 @@ static int _od_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } +#endif -static struct dev_power_domain omap_device_power_domain = { +#ifdef CONFIG_SUSPEND +static int _od_suspend_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct omap_device *od = to_omap_device(pdev); + int ret; + + if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND) + return pm_generic_suspend_noirq(dev); + + ret = pm_generic_suspend_noirq(dev); + + if (!ret && !pm_runtime_status_suspended(dev)) { + if (pm_generic_runtime_suspend(dev) == 0) { + omap_device_idle(pdev); + od->flags |= OMAP_DEVICE_SUSPENDED; + } + } + + return ret; +} + +static int _od_resume_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct omap_device *od = to_omap_device(pdev); + + if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND) + return pm_generic_resume_noirq(dev); + + if ((od->flags & OMAP_DEVICE_SUSPENDED) && + !pm_runtime_status_suspended(dev)) { + od->flags &= ~OMAP_DEVICE_SUSPENDED; + omap_device_enable(pdev); + pm_generic_runtime_resume(dev); + } + + return pm_generic_resume_noirq(dev); +} +#endif + +static struct dev_pm_domain omap_device_pm_domain = { .ops = { - .runtime_suspend = _od_runtime_suspend, - .runtime_idle = _od_runtime_idle, - .runtime_resume = _od_runtime_resume, + SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, + _od_runtime_idle) USE_PLATFORM_PM_SLEEP_OPS + SET_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq, _od_resume_noirq) } }; @@ -586,7 +629,7 @@ int omap_device_register(struct omap_device *od) pr_debug("omap_device: %s: registering\n", od->pdev.name); od->pdev.dev.parent = &omap_device_parent; - od->pdev.dev.pwr_domain = &omap_device_power_domain; + od->pdev.dev.pm_domain = &omap_device_pm_domain; return platform_device_register(&od->pdev); } diff --git a/arch/arm/plat-pxa/include/plat/sdhci.h b/arch/arm/plat-pxa/include/plat/sdhci.h deleted file mode 100644 index 1ab332e37d7..00000000000 --- a/arch/arm/plat-pxa/include/plat/sdhci.h +++ /dev/null @@ -1,35 +0,0 @@ -/* linux/arch/arm/plat-pxa/include/plat/sdhci.h - * - * Copyright 2010 Marvell - * Zhangfei Gao <zhangfei.gao@marvell.com> - * - * PXA Platform - SDHCI platform data definitions - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __PLAT_PXA_SDHCI_H -#define __PLAT_PXA_SDHCI_H - -/* pxa specific flag */ -/* Require clock free running */ -#define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0) - -/* Board design supports 8-bit data on SD/SDIO BUS */ -#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2) - -/* - * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI - * @max_speed: the maximum speed supported - * @quirks: quirks of specific device - * @flags: flags for platform requirement - */ -struct sdhci_pxa_platdata { - unsigned int max_speed; - unsigned int quirks; - unsigned int flags; -}; - -#endif /* __PLAT_PXA_SDHCI_H */ diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 38280ef4a2a..7336ba653b8 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -627,27 +627,6 @@ source "drivers/pci/hotplug/Kconfig" source "drivers/pcmcia/Kconfig" -config DMAR - bool "Support for DMA Remapping Devices (EXPERIMENTAL)" - depends on IA64_GENERIC && ACPI && EXPERIMENTAL - help - DMA remapping (DMAR) devices support enables independent address - translations for Direct Memory Access (DMA) from devices. - These DMA remapping devices are reported via ACPI tables - and include PCI device scope covered by these DMA - remapping devices. - -config DMAR_DEFAULT_ON - def_bool y - prompt "Enable DMA Remapping Devices by default" - depends on DMAR - help - Selecting this option will enable a DMAR device at boot time if - one is found. If this option is not selected, DMAR support can - be enabled by passing intel_iommu=on to the kernel. It is - recommended you say N here while the DMAR code remains - experimental. - endmenu endif @@ -681,6 +660,3 @@ source "lib/Kconfig" config IOMMU_HELPER def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) - -config IOMMU_API - def_bool (DMAR) diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 81a1f4e6bcd..485c42d97e8 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -112,8 +112,6 @@ static void sn_ack_irq(struct irq_data *data) irq_move_irq(data); } -static void sn_irq_info_free(struct rcu_head *head); - struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, nasid_t nasid, int slice) { @@ -177,7 +175,7 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, spin_lock(&sn_irq_info_lock); list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); spin_unlock(&sn_irq_info_lock); - call_rcu(&sn_irq_info->rcu, sn_irq_info_free); + kfree_rcu(sn_irq_info, rcu); finish_up: @@ -338,14 +336,6 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) rcu_read_unlock(); } -static void sn_irq_info_free(struct rcu_head *head) -{ - struct sn_irq_info *sn_irq_info; - - sn_irq_info = container_of(head, struct sn_irq_info, rcu); - kfree(sn_irq_info); -} - void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) { nasid_t nasid = sn_irq_info->irq_nasid; @@ -399,7 +389,7 @@ void sn_irq_unfixup(struct pci_dev *pci_dev) spin_unlock(&sn_irq_info_lock); if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) free_irq_vector(sn_irq_info->irq_irq); - call_rcu(&sn_irq_info->rcu, sn_irq_info_free); + kfree_rcu(sn_irq_info, rcu); pci_dev_put(pci_dev); } diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c index 8b6e201b2c2..c5748bb4ea7 100644 --- a/arch/m68k/emu/nfeth.c +++ b/arch/m68k/emu/nfeth.c @@ -16,6 +16,7 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/interrupt.h> #include <linux/module.h> #include <asm/natfeat.h> #include <asm/virtconvert.h> @@ -204,7 +205,6 @@ static struct net_device * __init nfeth_probe(int unit) dev->irq = nfEtherIRQ; dev->netdev_ops = &nfeth_netdev_ops; - dev->flags |= NETIF_F_NO_CSUM; memcpy(dev->dev_addr, mac, ETH_ALEN); priv = netdev_priv(dev); diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h index 746df91e579..242be57a319 100644 --- a/arch/microblaze/include/asm/pci-bridge.h +++ b/arch/microblaze/include/asm/pci-bridge.h @@ -19,9 +19,6 @@ enum { */ PCI_REASSIGN_ALL_RSRC = 0x00000001, - /* Re-assign all bus numbers */ - PCI_REASSIGN_ALL_BUS = 0x00000002, - /* Do not try to assign, just use existing setup */ PCI_PROBE_ONLY = 0x00000004, @@ -110,16 +107,6 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) return bus->sysdata; } -static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) -{ - struct pci_controller *host; - - if (bus->self) - return pci_device_to_OF_node(bus->self); - host = pci_bus_to_host(bus); - return host ? host->dn : NULL; -} - static inline int isa_vaddr_is_ioport(void __iomem *address) { /* No specific ISA handling on ppc32 at this stage, it diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index ba65cf47254..1dd9d6b1e27 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -40,8 +40,7 @@ struct pci_dev; * Set this to 1 if you want the kernel to re-assign all PCI * bus numbers (don't do that on ppc64 yet !) */ -#define pcibios_assign_all_busses() \ - (pci_has_flag(PCI_REASSIGN_ALL_BUS)) +#define pcibios_assign_all_busses() 0 static inline void pcibios_set_master(struct pci_dev *dev) { diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h index d0890d36ef6..9bd01ecb00d 100644 --- a/arch/microblaze/include/asm/prom.h +++ b/arch/microblaze/include/asm/prom.h @@ -29,21 +29,6 @@ extern int early_uartlite_console(void); extern int early_uart16550_console(void); -#ifdef CONFIG_PCI -/* - * PCI <-> OF matching functions - * (XXX should these be here?) - */ -struct pci_bus; -struct pci_dev; -extern int pci_device_from_OF_node(struct device_node *node, - u8 *bus, u8 *devfn); -extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus, - int devfn); -extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev); -extern void pci_create_OF_bus_map(void); -#endif - /* * OF address retreival & translation */ diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile index 9889cc2e129..d1114fbd478 100644 --- a/arch/microblaze/pci/Makefile +++ b/arch/microblaze/pci/Makefile @@ -2,5 +2,5 @@ # Makefile # -obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o +obj-$(CONFIG_PCI) += pci-common.o indirect_pci.o iomap.o obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 53599067d2f..041b1d86d75 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -50,6 +50,11 @@ unsigned int pci_flags; static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; +unsigned long isa_io_base; +unsigned long pci_dram_offset; +static int pci_bus_count; + + void set_pci_dma_ops(struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; @@ -1558,6 +1563,112 @@ void __devinit pcibios_setup_phb_resources(struct pci_controller *hose) (unsigned long)hose->io_base_virt - _IO_BASE); } +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + + return of_node_get(hose->dn); +} + +static void __devinit pcibios_scan_phb(struct pci_controller *hose) +{ + struct pci_bus *bus; + struct device_node *node = hose->dn; + unsigned long io_offset; + struct resource *res = &hose->io_resource; + + pr_debug("PCI: Scanning PHB %s\n", + node ? node->full_name : "<NO NAME>"); + + /* Create an empty bus for the toplevel */ + bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); + if (bus == NULL) { + printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", + hose->global_number); + return; + } + bus->secondary = hose->first_busno; + hose->bus = bus; + + /* Fixup IO space offset */ + io_offset = (unsigned long)hose->io_base_virt - isa_io_base; + res->start = (res->start + io_offset) & 0xffffffffu; + res->end = (res->end + io_offset) & 0xffffffffu; + + /* Wire up PHB bus resources */ + pcibios_setup_phb_resources(hose); + + /* Scan children */ + hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); +} + +static int __init pcibios_init(void) +{ + struct pci_controller *hose, *tmp; + int next_busno = 0; + + printk(KERN_INFO "PCI: Probing PCI hardware\n"); + + /* Scan all of the recorded PCI controllers. */ + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + hose->last_busno = 0xff; + pcibios_scan_phb(hose); + printk(KERN_INFO "calling pci_bus_add_devices()\n"); + pci_bus_add_devices(hose->bus); + if (next_busno <= hose->last_busno) + next_busno = hose->last_busno + 1; + } + pci_bus_count = next_busno; + + /* Call common code to handle resource allocation */ + pcibios_resource_survey(); + + return 0; +} + +subsys_initcall(pcibios_init); + +static struct pci_controller *pci_bus_to_hose(int bus) +{ + struct pci_controller *hose, *tmp; + + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + if (bus >= hose->first_busno && bus <= hose->last_busno) + return hose; + return NULL; +} + +/* Provide information on locations of various I/O regions in physical + * memory. Do this on a per-card basis so that we choose the right + * root bridge. + * Note that the returned IO or memory base is a physical address + */ + +long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) +{ + struct pci_controller *hose; + long result = -EOPNOTSUPP; + + hose = pci_bus_to_hose(bus); + if (!hose) + return -ENODEV; + + switch (which) { + case IOBASE_BRIDGE_NUMBER: + return (long)hose->first_busno; + case IOBASE_MEMORY: + return (long)hose->pci_mem_offset; + case IOBASE_IO: + return (long)hose->io_base_phys; + case IOBASE_ISA_IO: + return (long)isa_io_base; + case IOBASE_ISA_MEM: + return (long)isa_mem_base; + } + + return result; +} + /* * Null PCI config access functions, for the case when we can't * find a hose. @@ -1626,3 +1737,4 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn, { return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); } + diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c deleted file mode 100644 index 92728a6cfd8..00000000000 --- a/arch/microblaze/pci/pci_32.c +++ /dev/null @@ -1,432 +0,0 @@ -/* - * Common pmac/prep/chrp pci routines. -- Cort - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/capability.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/bootmem.h> -#include <linux/irq.h> -#include <linux/list.h> -#include <linux/of.h> -#include <linux/slab.h> - -#include <asm/processor.h> -#include <asm/io.h> -#include <asm/prom.h> -#include <asm/sections.h> -#include <asm/pci-bridge.h> -#include <asm/byteorder.h> -#include <asm/uaccess.h> - -#undef DEBUG - -unsigned long isa_io_base; -unsigned long pci_dram_offset; -int pcibios_assign_bus_offset = 1; - -static u8 *pci_to_OF_bus_map; - -/* By default, we don't re-assign bus numbers. We do this only on - * some pmacs - */ -static int pci_assign_all_buses; - -static int pci_bus_count; - -/* - * Functions below are used on OpenFirmware machines. - */ -static void -make_one_node_map(struct device_node *node, u8 pci_bus) -{ - const int *bus_range; - int len; - - if (pci_bus >= pci_bus_count) - return; - bus_range = of_get_property(node, "bus-range", &len); - if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %s, " - "assuming it starts at 0\n", node->full_name); - pci_to_OF_bus_map[pci_bus] = 0; - } else - pci_to_OF_bus_map[pci_bus] = bus_range[0]; - - for_each_child_of_node(node, node) { - struct pci_dev *dev; - const unsigned int *class_code, *reg; - - class_code = of_get_property(node, "class-code", NULL); - if (!class_code || - ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && - (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) - continue; - reg = of_get_property(node, "reg", NULL); - if (!reg) - continue; - dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff)); - if (!dev || !dev->subordinate) { - pci_dev_put(dev); - continue; - } - make_one_node_map(node, dev->subordinate->number); - pci_dev_put(dev); - } -} - -void -pcibios_make_OF_bus_map(void) -{ - int i; - struct pci_controller *hose, *tmp; - struct property *map_prop; - struct device_node *dn; - - pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL); - if (!pci_to_OF_bus_map) { - printk(KERN_ERR "Can't allocate OF bus map !\n"); - return; - } - - /* We fill the bus map with invalid values, that helps - * debugging. - */ - for (i = 0; i < pci_bus_count; i++) - pci_to_OF_bus_map[i] = 0xff; - - /* For each hose, we begin searching bridges */ - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { - struct device_node *node = hose->dn; - - if (!node) - continue; - make_one_node_map(node, hose->first_busno); - } - dn = of_find_node_by_path("/"); - map_prop = of_find_property(dn, "pci-OF-bus-map", NULL); - if (map_prop) { - BUG_ON(pci_bus_count > map_prop->length); - memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count); - } - of_node_put(dn); -#ifdef DEBUG - printk(KERN_INFO "PCI->OF bus map:\n"); - for (i = 0; i < pci_bus_count; i++) { - if (pci_to_OF_bus_map[i] == 0xff) - continue; - printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]); - } -#endif -} - -typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data); - -static struct device_node *scan_OF_pci_childs(struct device_node *parent, - pci_OF_scan_iterator filter, void *data) -{ - struct device_node *node; - struct device_node *sub_node; - - for_each_child_of_node(parent, node) { - const unsigned int *class_code; - - if (filter(node, data)) { - of_node_put(node); - return node; - } - - /* For PCI<->PCI bridges or CardBus bridges, we go down - * Note: some OFs create a parent node "multifunc-device" as - * a fake root for all functions of a multi-function device, - * we go down them as well. - */ - class_code = of_get_property(node, "class-code", NULL); - if ((!class_code || - ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && - (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && - strcmp(node->name, "multifunc-device")) - continue; - sub_node = scan_OF_pci_childs(node, filter, data); - if (sub_node) { - of_node_put(node); - return sub_node; - } - } - return NULL; -} - -static struct device_node *scan_OF_for_pci_dev(struct device_node *parent, - unsigned int devfn) -{ - struct device_node *np, *cnp; - const u32 *reg; - unsigned int psize; - - for_each_child_of_node(parent, np) { - reg = of_get_property(np, "reg", &psize); - if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn) - return np; - - /* Note: some OFs create a parent node "multifunc-device" as - * a fake root for all functions of a multi-function device, - * we go down them as well. */ - if (!strcmp(np->name, "multifunc-device")) { - cnp = scan_OF_for_pci_dev(np, devfn); - if (cnp) - return cnp; - } - } - return NULL; -} - - -static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus) -{ - struct device_node *parent, *np; - - /* Are we a root bus ? */ - if (bus->self == NULL || bus->parent == NULL) { - struct pci_controller *hose = pci_bus_to_host(bus); - if (hose == NULL) - return NULL; - return of_node_get(hose->dn); - } - - /* not a root bus, we need to get our parent */ - parent = scan_OF_for_pci_bus(bus->parent); - if (parent == NULL) - return NULL; - - /* now iterate for children for a match */ - np = scan_OF_for_pci_dev(parent, bus->self->devfn); - of_node_put(parent); - - return np; -} - -/* - * Scans the OF tree for a device node matching a PCI device - */ -struct device_node * -pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) -{ - struct device_node *parent, *np; - - pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); - parent = scan_OF_for_pci_bus(bus); - if (parent == NULL) - return NULL; - pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>"); - np = scan_OF_for_pci_dev(parent, devfn); - of_node_put(parent); - pr_debug(" result is %s\n", np ? np->full_name : "<NULL>"); - - /* XXX most callers don't release the returned node - * mostly because ppc64 doesn't increase the refcount, - * we need to fix that. - */ - return np; -} -EXPORT_SYMBOL(pci_busdev_to_OF_node); - -struct device_node* -pci_device_to_OF_node(struct pci_dev *dev) -{ - return pci_busdev_to_OF_node(dev->bus, dev->devfn); -} -EXPORT_SYMBOL(pci_device_to_OF_node); - -static int -find_OF_pci_device_filter(struct device_node *node, void *data) -{ - return ((void *)node == data); -} - -/* - * Returns the PCI device matching a given OF node - */ -int -pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn) -{ - const unsigned int *reg; - struct pci_controller *hose; - struct pci_dev *dev = NULL; - - /* Make sure it's really a PCI device */ - hose = pci_find_hose_for_OF_device(node); - if (!hose || !hose->dn) - return -ENODEV; - if (!scan_OF_pci_childs(hose->dn, - find_OF_pci_device_filter, (void *)node)) - return -ENODEV; - reg = of_get_property(node, "reg", NULL); - if (!reg) - return -ENODEV; - *bus = (reg[0] >> 16) & 0xff; - *devfn = ((reg[0] >> 8) & 0xff); - - /* Ok, here we need some tweak. If we have already renumbered - * all busses, we can't rely on the OF bus number any more. - * the pci_to_OF_bus_map is not enough as several PCI busses - * may match the same OF bus number. - */ - if (!pci_to_OF_bus_map) - return 0; - - for_each_pci_dev(dev) - if (pci_to_OF_bus_map[dev->bus->number] == *bus && - dev->devfn == *devfn) { - *bus = dev->bus->number; - pci_dev_put(dev); - return 0; - } - - return -ENODEV; -} -EXPORT_SYMBOL(pci_device_from_OF_node); - -/* We create the "pci-OF-bus-map" property now so it appears in the - * /proc device tree - */ -void __init -pci_create_OF_bus_map(void) -{ - struct property *of_prop; - struct device_node *dn; - - of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \ - 256); - if (!of_prop) - return; - dn = of_find_node_by_path("/"); - if (dn) { - memset(of_prop, -1, sizeof(struct property) + 256); - of_prop->name = "pci-OF-bus-map"; - of_prop->length = 256; - of_prop->value = &of_prop[1]; - prom_add_property(dn, of_prop); - of_node_put(dn); - } -} - -static void __devinit pcibios_scan_phb(struct pci_controller *hose) -{ - struct pci_bus *bus; - struct device_node *node = hose->dn; - unsigned long io_offset; - struct resource *res = &hose->io_resource; - - pr_debug("PCI: Scanning PHB %s\n", - node ? node->full_name : "<NO NAME>"); - - /* Create an empty bus for the toplevel */ - bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); - if (bus == NULL) { - printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", - hose->global_number); - return; - } - bus.dev->of_node = of_node_get(node); - bus->secondary = hose->first_busno; - hose->bus = bus; - - /* Fixup IO space offset */ - io_offset = (unsigned long)hose->io_base_virt - isa_io_base; - res->start = (res->start + io_offset) & 0xffffffffu; - res->end = (res->end + io_offset) & 0xffffffffu; - - /* Wire up PHB bus resources */ - pcibios_setup_phb_resources(hose); - - /* Scan children */ - hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); -} - -static int __init pcibios_init(void) -{ - struct pci_controller *hose, *tmp; - int next_busno = 0; - - printk(KERN_INFO "PCI: Probing PCI hardware\n"); - - if (pci_flags & PCI_REASSIGN_ALL_BUS) { - printk(KERN_INFO "setting pci_asign_all_busses\n"); - pci_assign_all_buses = 1; - } - - /* Scan all of the recorded PCI controllers. */ - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { - if (pci_assign_all_buses) - hose->first_busno = next_busno; - hose->last_busno = 0xff; - pcibios_scan_phb(hose); - printk(KERN_INFO "calling pci_bus_add_devices()\n"); - pci_bus_add_devices(hose->bus); - if (pci_assign_all_buses || next_busno <= hose->last_busno) - next_busno = hose->last_busno + \ - pcibios_assign_bus_offset; - } - pci_bus_count = next_busno; - - /* OpenFirmware based machines need a map of OF bus - * numbers vs. kernel bus numbers since we may have to - * remap them. - */ - if (pci_assign_all_buses) - pcibios_make_OF_bus_map(); - - /* Call common code to handle resource allocation */ - pcibios_resource_survey(); - - return 0; -} - -subsys_initcall(pcibios_init); - -static struct pci_controller* -pci_bus_to_hose(int bus) -{ - struct pci_controller *hose, *tmp; - - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) - if (bus >= hose->first_busno && bus <= hose->last_busno) - return hose; - return NULL; -} - -/* Provide information on locations of various I/O regions in physical - * memory. Do this on a per-card basis so that we choose the right - * root bridge. - * Note that the returned IO or memory base is a physical address - */ - -long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) -{ - struct pci_controller *hose; - long result = -EOPNOTSUPP; - - hose = pci_bus_to_hose(bus); - if (!hose) - return -ENODEV; - - switch (which) { - case IOBASE_BRIDGE_NUMBER: - return (long)hose->first_busno; - case IOBASE_MEMORY: - return (long)hose->pci_mem_offset; - case IOBASE_IO: - return (long)hose->io_base_phys; - case IOBASE_ISA_IO: - return (long)isa_io_base; - case IOBASE_ISA_MEM: - return (long)isa_mem_base; - } - - return result; -} diff --git a/arch/mips/include/asm/stacktrace.h b/arch/mips/include/asm/stacktrace.h index 0bf82818aa5..780ee2c2a2a 100644 --- a/arch/mips/include/asm/stacktrace.h +++ b/arch/mips/include/asm/stacktrace.h @@ -7,6 +7,10 @@ extern int raw_show_trace; extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, unsigned long pc, unsigned long *ra); +extern unsigned long unwind_stack_by_address(unsigned long stack_page, + unsigned long *sp, + unsigned long pc, + unsigned long *ra); #else #define raw_show_trace 1 static inline unsigned long unwind_stack(struct task_struct *task, diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index a8244854d3d..d0deaab9ace 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc, if (!mipspmu_event_set_period(event, hwc, idx)) return; - if (perf_event_overflow(event, 0, data, regs)) + if (perf_event_overflow(event, data, regs)) mipspmu->disable_event(idx); } diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 75266ff4cc3..e5ad09a9baf 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -377,6 +377,20 @@ static const struct mips_perf_event mipsxxcore_cache_map [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, }; /* 74K core has completely different cache event map. */ @@ -480,6 +494,20 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, }; #ifdef CONFIG_MIPS_MT_SMP diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index d2112d3cf11..c28fbe6107b 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -373,18 +373,18 @@ unsigned long thread_saved_pc(struct task_struct *tsk) #ifdef CONFIG_KALLSYMS -/* used by show_backtrace() */ -unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, - unsigned long pc, unsigned long *ra) +/* generic stack unwinding function */ +unsigned long notrace unwind_stack_by_address(unsigned long stack_page, + unsigned long *sp, + unsigned long pc, + unsigned long *ra) { - unsigned long stack_page; struct mips_frame_info info; unsigned long size, ofs; int leaf; extern void ret_from_irq(void); extern void ret_from_exception(void); - stack_page = (unsigned long)task_stack_page(task); if (!stack_page) return 0; @@ -443,6 +443,15 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, *ra = 0; return __kernel_text_address(pc) ? pc : 0; } +EXPORT_SYMBOL(unwind_stack_by_address); + +/* used by show_backtrace() */ +unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, + unsigned long pc, unsigned long *ra) +{ + unsigned long stack_page = (unsigned long)task_stack_page(task); + return unwind_stack_by_address(stack_page, sp, pc, ra); +} #endif /* diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e9b3af27d84..b7517e3abc8 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) { if ((opcode & OPCODE) == LL) { perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, regs, 0); + 1, regs, 0); return simulate_ll(regs, opcode); } if ((opcode & OPCODE) == SC) { perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, regs, 0); + 1, regs, 0); return simulate_sc(regs, opcode); } @@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) int rd = (opcode & RD) >> 11; int rt = (opcode & RT) >> 16; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, regs, 0); + 1, regs, 0); switch (rd) { case 0: /* CPU number */ regs->regs[rt] = smp_processor_id(); @@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) { if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, regs, 0); + 1, regs, 0); return 0; } diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index cfea1adfa15..eb319b58035 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, unsigned long value; unsigned int res; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); /* * This load never faults. @@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs) mm_segment_t seg; perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, - 1, 0, regs, regs->cp0_badvaddr); + 1, regs, regs->cp0_badvaddr); /* * Did we catch a fault trying to load an instruction? * Or are we running in MIPS16 mode? diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index d32cb050311..dbf2f93a509 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, } emul: - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, xcp, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0); MIPS_FPU_EMU_INC_STATS(emulated); switch (MIPSInst_OPCODE(ir)) { case ldc1_op:{ diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 137ee76a004..937cf336816 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -145,7 +145,7 @@ good_area: * the fault. */ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -154,12 +154,10 @@ good_area: BUG(); } if (fault & VM_FAULT_MAJOR) { - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, - 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); tsk->maj_flt++; } else { - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, - 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); tsk->min_flt++; } diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile index 4b9d7044e26..29f2f13eb31 100644 --- a/arch/mips/oprofile/Makefile +++ b/arch/mips/oprofile/Makefile @@ -8,7 +8,7 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ oprofilefs.o oprofile_stats.o \ timer_int.o ) -oprofile-y := $(DRIVER_OBJS) common.o +oprofile-y := $(DRIVER_OBJS) common.o backtrace.o oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c new file mode 100644 index 00000000000..6854ed5097d --- /dev/null +++ b/arch/mips/oprofile/backtrace.c @@ -0,0 +1,175 @@ +#include <linux/oprofile.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <asm/ptrace.h> +#include <asm/stacktrace.h> +#include <linux/stacktrace.h> +#include <linux/kernel.h> +#include <asm/sections.h> +#include <asm/inst.h> + +struct stackframe { + unsigned long sp; + unsigned long pc; + unsigned long ra; +}; + +static inline int get_mem(unsigned long addr, unsigned long *result) +{ + unsigned long *address = (unsigned long *) addr; + if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long))) + return -1; + if (__copy_from_user_inatomic(result, address, sizeof(unsigned long))) + return -3; + return 0; +} + +/* + * These two instruction helpers were taken from process.c + */ +static inline int is_ra_save_ins(union mips_instruction *ip) +{ + /* sw / sd $ra, offset($sp) */ + return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) + && ip->i_format.rs == 29 && ip->i_format.rt == 31; +} + +static inline int is_sp_move_ins(union mips_instruction *ip) +{ + /* addiu/daddiu sp,sp,-imm */ + if (ip->i_format.rs != 29 || ip->i_format.rt != 29) + return 0; + if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) + return 1; + return 0; +} + +/* + * Looks for specific instructions that mark the end of a function. + * This usually means we ran into the code area of the previous function. + */ +static inline int is_end_of_function_marker(union mips_instruction *ip) +{ + /* jr ra */ + if (ip->r_format.func == jr_op && ip->r_format.rs == 31) + return 1; + /* lui gp */ + if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28) + return 1; + return 0; +} + +/* + * TODO for userspace stack unwinding: + * - handle cases where the stack is adjusted inside a function + * (generally doesn't happen) + * - find optimal value for max_instr_check + * - try to find a way to handle leaf functions + */ + +static inline int unwind_user_frame(struct stackframe *old_frame, + const unsigned int max_instr_check) +{ + struct stackframe new_frame = *old_frame; + off_t ra_offset = 0; + size_t stack_size = 0; + unsigned long addr; + + if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0) + return -9; + + for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc) + && (!ra_offset || !stack_size); --addr) { + union mips_instruction ip; + + if (get_mem(addr, (unsigned long *) &ip)) + return -11; + + if (is_sp_move_ins(&ip)) { + int stack_adjustment = ip.i_format.simmediate; + if (stack_adjustment > 0) + /* This marks the end of the previous function, + which means we overran. */ + break; + stack_size = (unsigned) stack_adjustment; + } else if (is_ra_save_ins(&ip)) { + int ra_slot = ip.i_format.simmediate; + if (ra_slot < 0) + /* This shouldn't happen. */ + break; + ra_offset = ra_slot; + } else if (is_end_of_function_marker(&ip)) + break; + } + + if (!ra_offset || !stack_size) + return -1; + + if (ra_offset) { + new_frame.ra = old_frame->sp + ra_offset; + if (get_mem(new_frame.ra, &(new_frame.ra))) + return -13; + } + + if (stack_size) { + new_frame.sp = old_frame->sp + stack_size; + if (get_mem(new_frame.sp, &(new_frame.sp))) + return -14; + } + + if (new_frame.sp > old_frame->sp) + return -2; + + new_frame.pc = old_frame->ra; + *old_frame = new_frame; + + return 0; +} + +static inline void do_user_backtrace(unsigned long low_addr, + struct stackframe *frame, + unsigned int depth) +{ + const unsigned int max_instr_check = 512; + const unsigned long high_addr = low_addr + THREAD_SIZE; + + while (depth-- && !unwind_user_frame(frame, max_instr_check)) { + oprofile_add_trace(frame->ra); + if (frame->sp < low_addr || frame->sp > high_addr) + break; + } +} + +#ifndef CONFIG_KALLSYMS +static inline void do_kernel_backtrace(unsigned long low_addr, + struct stackframe *frame, + unsigned int depth) { } +#else +static inline void do_kernel_backtrace(unsigned long low_addr, + struct stackframe *frame, + unsigned int depth) +{ + while (depth-- && frame->pc) { + frame->pc = unwind_stack_by_address(low_addr, + &(frame->sp), + frame->pc, + &(frame->ra)); + oprofile_add_trace(frame->ra); + } +} +#endif + +void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth) +{ + struct stackframe frame = { .sp = regs->regs[29], + .pc = regs->cp0_epc, + .ra = regs->regs[31] }; + const int userspace = user_mode(regs); + const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE); + + if (userspace) + do_user_backtrace(low_addr, &frame, depth); + else + do_kernel_backtrace(low_addr, &frame, depth); +} diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c index f9eb1aba634..d1f2d4c52d4 100644 --- a/arch/mips/oprofile/common.c +++ b/arch/mips/oprofile/common.c @@ -115,6 +115,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ops->start = op_mips_start; ops->stop = op_mips_stop; ops->cpu_type = lmodel->cpu_type; + ops->backtrace = op_mips_backtrace; printk(KERN_INFO "oprofile: using %s performance monitoring.\n", lmodel->cpu_type); diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h index f04b54fb37d..7c2da27ece0 100644 --- a/arch/mips/oprofile/op_impl.h +++ b/arch/mips/oprofile/op_impl.h @@ -36,4 +36,6 @@ struct op_mips_model { unsigned char num_counters; }; +void op_mips_backtrace(struct pt_regs * const regs, unsigned int depth); + #endif diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 2729c6663d8..cdf7a0a6440 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -134,6 +134,7 @@ config PPC select GENERIC_IRQ_SHOW_LEVEL select HAVE_RCU_TABLE_FREE if SMP select HAVE_SYSCALL_TRACEPOINTS + select HAVE_BPF_JIT if (PPC64 && NET) config EARLY_PRINTK bool diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index b7212b619c5..b94740f36b1 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -154,7 +154,8 @@ core-y += arch/powerpc/kernel/ \ arch/powerpc/lib/ \ arch/powerpc/sysdev/ \ arch/powerpc/platforms/ \ - arch/powerpc/math-emu/ + arch/powerpc/math-emu/ \ + arch/powerpc/net/ core-$(CONFIG_XMON) += arch/powerpc/xmon/ core-$(CONFIG_KVM) += arch/powerpc/kvm/ diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index 45921672b97..2cc41c715d2 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h @@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type); #define PPC_WARN_EMULATED(type, regs) \ do { \ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ - 1, 0, regs, 0); \ + 1, regs, 0); \ __PPC_WARN_EMULATED(type); \ } while (0) #define PPC_WARN_ALIGNMENT(type, regs) \ do { \ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ - 1, 0, regs, regs->dar); \ + 1, regs, regs->dar); \ __PPC_WARN_EMULATED(type); \ } while (0) diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 1c33ec17ca3..80fd4d2b4a6 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -57,7 +57,7 @@ void hw_breakpoint_pmu_read(struct perf_event *bp); extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); extern struct pmu perf_ops_bp; -extern void ptrace_triggered(struct perf_event *bp, int nmi, +extern void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs); static inline void hw_breakpoint_disable(void) { diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index b90dbf8e5cd..90bd3ed4816 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -171,15 +171,9 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) #ifndef CONFIG_PPC64 -static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) -{ - struct pci_controller *host; - - if (bus->self) - return pci_device_to_OF_node(bus->self); - host = pci_bus_to_host(bus); - return host ? host->dn : NULL; -} +extern int pci_device_from_OF_node(struct device_node *node, + u8 *bus, u8 *devfn); +extern void pci_create_OF_bus_map(void); static inline int isa_vaddr_is_ioport(void __iomem *address) { @@ -223,17 +217,8 @@ struct pci_dn { /* Get the pointer to a device_node's pci_dn */ #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) -extern struct device_node *fetch_dev_dn(struct pci_dev *dev); extern void * update_dn_pci_info(struct device_node *dn, void *data); -/* Get a device_node from a pci_dev. This code must be fast except - * in the case where the sysdata is incorrect and needs to be fixed - * up (this will only happen once). */ -static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev) -{ - return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev); -} - static inline int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn) { @@ -244,14 +229,6 @@ static inline int pci_device_from_OF_node(struct device_node *np, return 0; } -static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) -{ - if (bus->self) - return pci_device_to_OF_node(bus->self); - else - return bus->dev.of_node; /* Must be root bus (PHB) */ -} - /** Find the bus corresponding to the indicated device node */ extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 7d7790954e0..1f522680ea1 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -179,8 +179,7 @@ extern int remove_phb_dynamic(struct pci_controller *phb); extern struct pci_dev *of_create_pci_dev(struct device_node *node, struct pci_bus *bus, int devfn); -extern void of_scan_pci_bridge(struct device_node *node, - struct pci_dev *dev); +extern void of_scan_pci_bridge(struct pci_dev *dev); extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index e472659d906..e980faae422 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -71,6 +71,42 @@ #define PPC_INST_ERATSX 0x7c000126 #define PPC_INST_ERATSX_DOT 0x7c000127 +/* Misc instructions for BPF compiler */ +#define PPC_INST_LD 0xe8000000 +#define PPC_INST_LHZ 0xa0000000 +#define PPC_INST_LWZ 0x80000000 +#define PPC_INST_STD 0xf8000000 +#define PPC_INST_STDU 0xf8000001 +#define PPC_INST_MFLR 0x7c0802a6 +#define PPC_INST_MTLR 0x7c0803a6 +#define PPC_INST_CMPWI 0x2c000000 +#define PPC_INST_CMPDI 0x2c200000 +#define PPC_INST_CMPLW 0x7c000040 +#define PPC_INST_CMPLWI 0x28000000 +#define PPC_INST_ADDI 0x38000000 +#define PPC_INST_ADDIS 0x3c000000 +#define PPC_INST_ADD 0x7c000214 +#define PPC_INST_SUB 0x7c000050 +#define PPC_INST_BLR 0x4e800020 +#define PPC_INST_BLRL 0x4e800021 +#define PPC_INST_MULLW 0x7c0001d6 +#define PPC_INST_MULHWU 0x7c000016 +#define PPC_INST_MULLI 0x1c000000 +#define PPC_INST_DIVWU 0x7c0003d6 +#define PPC_INST_RLWINM 0x54000000 +#define PPC_INST_RLDICR 0x78000004 +#define PPC_INST_SLW 0x7c000030 +#define PPC_INST_SRW 0x7c000430 +#define PPC_INST_AND 0x7c000038 +#define PPC_INST_ANDDOT 0x7c000039 +#define PPC_INST_OR 0x7c000378 +#define PPC_INST_ANDI 0x70000000 +#define PPC_INST_ORI 0x60000000 +#define PPC_INST_ORIS 0x64000000 +#define PPC_INST_NEG 0x7c0000d0 +#define PPC_INST_BRANCH 0x48000000 +#define PPC_INST_BRANCH_COND 0x40800000 + /* macros to insert fields into opcodes */ #define __PPC_RA(a) (((a) & 0x1f) << 16) #define __PPC_RB(b) (((b) & 0x1f) << 11) @@ -83,6 +119,10 @@ #define __PPC_T_TLB(t) (((t) & 0x3) << 21) #define __PPC_WC(w) (((w) & 0x3) << 21) #define __PPC_WS(w) (((w) & 0x1f) << 11) +#define __PPC_SH(s) __PPC_WS(s) +#define __PPC_MB(s) (((s) & 0x1f) << 6) +#define __PPC_ME(s) (((s) & 0x1f) << 1) +#define __PPC_BI(s) (((s) & 0x1f) << 16) /* * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index c189aa5fe1f..b823536375d 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -22,20 +22,6 @@ #define HAVE_ARCH_DEVTREE_FIXUPS -#ifdef CONFIG_PPC32 -/* - * PCI <-> OF matching functions - * (XXX should these be here?) - */ -struct pci_bus; -struct pci_dev; -extern int pci_device_from_OF_node(struct device_node *node, - u8* bus, u8* devfn); -extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int); -extern struct device_node* pci_device_to_OF_node(struct pci_dev *); -extern void pci_create_OF_bus_map(void); -#endif - /* * OF address retreival & translation */ diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c index b150b510510..cb2e2949c8d 100644 --- a/arch/powerpc/kernel/e500-pmu.c +++ b/arch/powerpc/kernel/e500-pmu.c @@ -75,6 +75,11 @@ static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static int num_events = 128; diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c index 2cc5e0301d0..845a5847889 100644 --- a/arch/powerpc/kernel/mpc7450-pmu.c +++ b/arch/powerpc/kernel/mpc7450-pmu.c @@ -388,6 +388,11 @@ static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; struct power_pmu mpc7450_pmu = { diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 893af2a9cd0..a3c92770e42 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1097,9 +1097,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) if (dev->is_added) continue; - /* Setup OF node pointer in the device */ - dev->dev.of_node = pci_device_to_OF_node(dev); - /* Fixup NUMA node as it may not be setup yet by the generic * code and is needed by the DMA init */ @@ -1685,6 +1682,13 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn, return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); } +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + + return of_node_get(hose->dn); +} + /** * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus * @hose: Pointer to the PCI host controller instance structure @@ -1705,7 +1709,6 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose) hose->global_number); return; } - bus->dev.of_node = of_node_get(node); bus->secondary = hose->first_busno; hose->bus = bus; diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index bedb370459f..86585508e9c 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -167,150 +167,26 @@ pcibios_make_OF_bus_map(void) #endif } -typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data); - -static struct device_node* -scan_OF_pci_childs(struct device_node *parent, pci_OF_scan_iterator filter, void* data) -{ - struct device_node *node; - struct device_node* sub_node; - - for_each_child_of_node(parent, node) { - const unsigned int *class_code; - - if (filter(node, data)) { - of_node_put(node); - return node; - } - - /* For PCI<->PCI bridges or CardBus bridges, we go down - * Note: some OFs create a parent node "multifunc-device" as - * a fake root for all functions of a multi-function device, - * we go down them as well. - */ - class_code = of_get_property(node, "class-code", NULL); - if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && - (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && - strcmp(node->name, "multifunc-device")) - continue; - sub_node = scan_OF_pci_childs(node, filter, data); - if (sub_node) { - of_node_put(node); - return sub_node; - } - } - return NULL; -} - -static struct device_node *scan_OF_for_pci_dev(struct device_node *parent, - unsigned int devfn) -{ - struct device_node *np, *cnp; - const u32 *reg; - unsigned int psize; - - for_each_child_of_node(parent, np) { - reg = of_get_property(np, "reg", &psize); - if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn) - return np; - - /* Note: some OFs create a parent node "multifunc-device" as - * a fake root for all functions of a multi-function device, - * we go down them as well. */ - if (!strcmp(np->name, "multifunc-device")) { - cnp = scan_OF_for_pci_dev(np, devfn); - if (cnp) - return cnp; - } - } - return NULL; -} - - -static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus) -{ - struct device_node *parent, *np; - - /* Are we a root bus ? */ - if (bus->self == NULL || bus->parent == NULL) { - struct pci_controller *hose = pci_bus_to_host(bus); - if (hose == NULL) - return NULL; - return of_node_get(hose->dn); - } - - /* not a root bus, we need to get our parent */ - parent = scan_OF_for_pci_bus(bus->parent); - if (parent == NULL) - return NULL; - - /* now iterate for children for a match */ - np = scan_OF_for_pci_dev(parent, bus->self->devfn); - of_node_put(parent); - - return np; -} - -/* - * Scans the OF tree for a device node matching a PCI device - */ -struct device_node * -pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) -{ - struct device_node *parent, *np; - - pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); - parent = scan_OF_for_pci_bus(bus); - if (parent == NULL) - return NULL; - pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>"); - np = scan_OF_for_pci_dev(parent, devfn); - of_node_put(parent); - pr_debug(" result is %s\n", np ? np->full_name : "<NULL>"); - - /* XXX most callers don't release the returned node - * mostly because ppc64 doesn't increase the refcount, - * we need to fix that. - */ - return np; -} -EXPORT_SYMBOL(pci_busdev_to_OF_node); - -struct device_node* -pci_device_to_OF_node(struct pci_dev *dev) -{ - return pci_busdev_to_OF_node(dev->bus, dev->devfn); -} -EXPORT_SYMBOL(pci_device_to_OF_node); - -static int -find_OF_pci_device_filter(struct device_node* node, void* data) -{ - return ((void *)node == data); -} /* * Returns the PCI device matching a given OF node */ -int -pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) +int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn) { - const unsigned int *reg; - struct pci_controller* hose; - struct pci_dev* dev = NULL; - - /* Make sure it's really a PCI device */ - hose = pci_find_hose_for_OF_device(node); - if (!hose || !hose->dn) - return -ENODEV; - if (!scan_OF_pci_childs(hose->dn, - find_OF_pci_device_filter, (void *)node)) + struct pci_dev *dev = NULL; + const __be32 *reg; + int size; + + /* Check if it might have a chance to be a PCI device */ + if (!pci_find_hose_for_OF_device(node)) return -ENODEV; - reg = of_get_property(node, "reg", NULL); - if (!reg) + + reg = of_get_property(node, "reg", &size); + if (!reg || size < 5 * sizeof(u32)) return -ENODEV; - *bus = (reg[0] >> 16) & 0xff; - *devfn = ((reg[0] >> 8) & 0xff); + + *bus = (be32_to_cpup(®[0]) >> 16) & 0xff; + *devfn = (be32_to_cpup(®[0]) >> 8) & 0xff; /* Ok, here we need some tweak. If we have already renumbered * all busses, we can't rely on the OF bus number any more. diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 6baabc13306..478f8d78716 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -142,53 +142,6 @@ void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb) traverse_pci_devices(dn, update_dn_pci_info, phb); } -/* - * Traversal func that looks for a <busno,devfcn> value. - * If found, the pci_dn is returned (thus terminating the traversal). - */ -static void *is_devfn_node(struct device_node *dn, void *data) -{ - int busno = ((unsigned long)data >> 8) & 0xff; - int devfn = ((unsigned long)data) & 0xff; - struct pci_dn *pci = dn->data; - - if (pci && (devfn == pci->devfn) && (busno == pci->busno)) - return dn; - return NULL; -} - -/* - * This is the "slow" path for looking up a device_node from a - * pci_dev. It will hunt for the device under its parent's - * phb and then update of_node pointer. - * - * It may also do fixups on the actual device since this happens - * on the first read/write. - * - * Note that it also must deal with devices that don't exist. - * In this case it may probe for real hardware ("just in case") - * and add a device_node to the device tree if necessary. - * - * Is this function necessary anymore now that dev->dev.of_node is - * used to store the node pointer? - * - */ -struct device_node *fetch_dev_dn(struct pci_dev *dev) -{ - struct pci_controller *phb = dev->sysdata; - struct device_node *dn; - unsigned long searchval = (dev->bus->number << 8) | dev->devfn; - - if (WARN_ON(!phb)) - return NULL; - - dn = traverse_pci_devices(phb->dn, is_devfn_node, (void *)searchval); - if (dn) - dev->dev.of_node = dn; - return dn; -} -EXPORT_SYMBOL(fetch_dev_dn); - /** * pci_devs_phb_init - Initialize phbs and pci devs under them. * diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 1e89a72fd03..fe0a5ad6f73 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -202,9 +202,9 @@ EXPORT_SYMBOL(of_create_pci_dev); * this routine in turn call of_scan_bus() recusively to scan for more child * devices. */ -void __devinit of_scan_pci_bridge(struct device_node *node, - struct pci_dev *dev) +void __devinit of_scan_pci_bridge(struct pci_dev *dev) { + struct device_node *node = dev->dev.of_node; struct pci_bus *bus; const u32 *busrange, *ranges; int len, i, mode; @@ -238,7 +238,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node, bus->primary = dev->bus->number; bus->subordinate = busrange[1]; bus->bridge_ctl = 0; - bus->dev.of_node = of_node_get(node); /* parse ranges property */ /* PCI #address-cells == 3 and #size-cells == 2 always */ @@ -335,9 +334,7 @@ static void __devinit __of_scan_bus(struct device_node *node, list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { - struct device_node *child = pci_device_to_OF_node(dev); - if (child) - of_scan_pci_bridge(child, dev); + of_scan_pci_bridge(dev); } } } diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 822f63008ae..14967de9887 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1207,7 +1207,7 @@ struct pmu power_pmu = { * here so there is no possibility of being interrupted. */ static void record_and_restart(struct perf_event *event, unsigned long val, - struct pt_regs *regs, int nmi) + struct pt_regs *regs) { u64 period = event->hw.sample_period; s64 prev, delta, left; @@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (event->attr.sample_type & PERF_SAMPLE_ADDR) perf_get_data_addr(regs, &data.addr); - if (perf_event_overflow(event, nmi, &data, regs)) + if (perf_event_overflow(event, &data, regs)) power_pmu_stop(event, 0); } } @@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if ((int)val < 0) { /* event has overflowed */ found = 1; - record_and_restart(event, val, regs, nmi); + record_and_restart(event, val, regs); } } diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index b0dc8f7069c..0a6d2a9d569 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = { * here so there is no possibility of being interrupted. */ static void record_and_restart(struct perf_event *event, unsigned long val, - struct pt_regs *regs, int nmi) + struct pt_regs *regs) { u64 period = event->hw.sample_period; s64 prev, delta, left; @@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, perf_sample_data_init(&data, 0); data.period = event->hw.last_period; - if (perf_event_overflow(event, nmi, &data, regs)) + if (perf_event_overflow(event, &data, regs)) fsl_emb_pmu_stop(event, 0); } } @@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (event) { /* event has overflowed */ found = 1; - record_and_restart(event, val, regs, nmi); + record_and_restart(event, val, regs); } else { /* * Disabled counter is negative, diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index ead8b3c2649..e9dbc2d35c9 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c @@ -587,6 +587,11 @@ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu power4_pmu = { diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index eca0ac595cb..f58a2bd41b5 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -653,6 +653,11 @@ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu power5p_pmu = { diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index d5ff0f64a5e..b1acab68414 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -595,6 +595,11 @@ static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu power5_pmu = { diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index 31603927e37..b24a3a23d07 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -516,6 +516,11 @@ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu power6_pmu = { diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 593740fcb79..6d9dccb2ea5 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c @@ -342,6 +342,11 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu power7_pmu = { diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 9a6e093858f..b121de9658e 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -467,6 +467,11 @@ static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu ppc970_pmu = { diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index cb22024f2b4..05b7dd217f6 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task) } #ifdef CONFIG_HAVE_HW_BREAKPOINT -void ptrace_triggered(struct perf_event *bp, int nmi, +void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_event_attr attr; @@ -973,7 +973,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, &attr.bp_type); thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, - ptrace_triggered, task); + ptrace_triggered, NULL, task); if (IS_ERR(bp)) { thread->ptrace_bps[0] = NULL; ptrace_put_breakpoints(task); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index f33acfd872a..03b29a6759a 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -544,7 +544,7 @@ DEFINE_PER_CPU(u8, irq_work_pending); #endif /* 32 vs 64 bit */ -void set_irq_work_pending(void) +void arch_irq_work_raise(void) { preempt_disable(); set_irq_work_pending_flag(); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index ad35f66c69e..5efe8c96d37 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -174,7 +174,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, die("Weird page fault", regs, SIGSEGV); } - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the @@ -320,7 +320,7 @@ good_area: } if (ret & VM_FAULT_MAJOR) { current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); #ifdef CONFIG_PPC_SMLPAR if (firmware_has_feature(FW_FEATURE_CMO)) { @@ -331,7 +331,7 @@ good_area: #endif } else { current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } up_read(&mm->mmap_sem); diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile new file mode 100644 index 00000000000..266b3950c3a --- /dev/null +++ b/arch/powerpc/net/Makefile @@ -0,0 +1,4 @@ +# +# Arch-specific network modules +# +obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h new file mode 100644 index 00000000000..af1ab5e9a69 --- /dev/null +++ b/arch/powerpc/net/bpf_jit.h @@ -0,0 +1,227 @@ +/* bpf_jit.h: BPF JIT compiler for PPC64 + * + * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#ifndef _BPF_JIT_H +#define _BPF_JIT_H + +#define BPF_PPC_STACK_LOCALS 32 +#define BPF_PPC_STACK_BASIC (48+64) +#define BPF_PPC_STACK_SAVE (18*8) +#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ + BPF_PPC_STACK_SAVE) +#define BPF_PPC_SLOWPATH_FRAME (48+64) + +/* + * Generated code register usage: + * + * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with: + * + * skb r3 (Entry parameter) + * A register r4 + * X register r5 + * addr param r6 + * r7-r10 scratch + * skb->data r14 + * skb headlen r15 (skb->len - skb->data_len) + * m[0] r16 + * m[...] ... + * m[15] r31 + */ +#define r_skb 3 +#define r_ret 3 +#define r_A 4 +#define r_X 5 +#define r_addr 6 +#define r_scratch1 7 +#define r_D 14 +#define r_HL 15 +#define r_M 16 + +#ifndef __ASSEMBLY__ + +/* + * Assembly helpers from arch/powerpc/net/bpf_jit.S: + */ +extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; + +#define FUNCTION_DESCR_SIZE 24 + +/* + * 16-bit immediate helper macros: HA() is for use with sign-extending instrs + * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the + * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000). + */ +#define IMM_H(i) ((uintptr_t)(i)>>16) +#define IMM_HA(i) (((uintptr_t)(i)>>16) + \ + (((uintptr_t)(i) & 0x8000) >> 15)) +#define IMM_L(i) ((uintptr_t)(i) & 0xffff) + +#define PLANT_INSTR(d, idx, instr) \ + do { if (d) { (d)[idx] = instr; } idx++; } while (0) +#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) + +#define PPC_NOP() EMIT(PPC_INST_NOP) +#define PPC_BLR() EMIT(PPC_INST_BLR) +#define PPC_BLRL() EMIT(PPC_INST_BLRL) +#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | __PPC_RT(r)) +#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | __PPC_RT(d) | \ + __PPC_RA(a) | IMM_L(i)) +#define PPC_MR(d, a) PPC_OR(d, a, a) +#define PPC_LI(r, i) PPC_ADDI(r, 0, i) +#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \ + __PPC_RS(d) | __PPC_RA(a) | IMM_L(i)) +#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) +#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | __PPC_RS(r) | \ + __PPC_RA(base) | ((i) & 0xfffc)) + +#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | __PPC_RT(r) | \ + __PPC_RA(base) | IMM_L(i)) +#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | __PPC_RT(r) | \ + __PPC_RA(base) | IMM_L(i)) +#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | __PPC_RT(r) | \ + __PPC_RA(base) | IMM_L(i)) +/* Convenience helpers for the above with 'far' offsets: */ +#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \ + else { PPC_ADDIS(r, base, IMM_HA(i)); \ + PPC_LD(r, r, IMM_L(i)); } } while(0) + +#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \ + else { PPC_ADDIS(r, base, IMM_HA(i)); \ + PPC_LWZ(r, r, IMM_L(i)); } } while(0) + +#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \ + else { PPC_ADDIS(r, base, IMM_HA(i)); \ + PPC_LHZ(r, r, IMM_L(i)); } } while(0) + +#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | __PPC_RA(a) | IMM_L(i)) +#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | __PPC_RA(a) | IMM_L(i)) +#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | __PPC_RA(a) | IMM_L(i)) +#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | __PPC_RA(a) | __PPC_RB(b)) + +#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | __PPC_RT(d) | \ + __PPC_RB(a) | __PPC_RA(b)) +#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | __PPC_RT(d) | \ + __PPC_RA(a) | __PPC_RB(b)) +#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | __PPC_RT(d) | \ + __PPC_RA(a) | __PPC_RB(b)) +#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | __PPC_RT(d) | \ + __PPC_RA(a) | __PPC_RB(b)) +#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | __PPC_RT(d) | \ + __PPC_RA(a) | IMM_L(i)) +#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | __PPC_RT(d) | \ + __PPC_RA(a) | __PPC_RB(b)) +#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_RB(b)) +#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | __PPC_RA(d) | \ + __PPC_RS(a) | IMM_L(i)) +#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_RB(b)) +#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_RB(b)) +#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | __PPC_RA(d) | \ + __PPC_RS(a) | IMM_L(i)) +#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | __PPC_RA(d) | \ + __PPC_RS(a) | IMM_L(i)) +#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_RB(s)) +#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_RB(s)) +/* slwi = rlwinm Rx, Ry, n, 0, 31-n */ +#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_SH(i) | \ + __PPC_MB(0) | __PPC_ME(31-(i))) +/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */ +#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_SH(32-(i)) | \ + __PPC_MB(i) | __PPC_ME(31)) +/* sldi = rldicr Rx, Ry, n, 63-n */ +#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_SH(i) | \ + __PPC_MB(63-(i)) | (((i) & 0x20) >> 4)) +#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | __PPC_RT(d) | __PPC_RA(a)) + +/* Long jump; (unconditional 'branch') */ +#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ + (((dest) - (ctx->idx * 4)) & 0x03fffffc)) +/* "cond" here covers BO:BI fields. */ +#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \ + (((cond) & 0x3ff) << 16) | \ + (((dest) - (ctx->idx * 4)) & \ + 0xfffc)) +#define PPC_LI32(d, i) do { PPC_LI(d, IMM_L(i)); \ + if ((u32)(uintptr_t)(i) >= 32768) { \ + PPC_ADDIS(d, d, IMM_HA(i)); \ + } } while(0) +#define PPC_LI64(d, i) do { \ + if (!((uintptr_t)(i) & 0xffffffff00000000ULL)) \ + PPC_LI32(d, i); \ + else { \ + PPC_LIS(d, ((uintptr_t)(i) >> 48)); \ + if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \ + PPC_ORI(d, d, \ + ((uintptr_t)(i) >> 32) & 0xffff); \ + PPC_SLDI(d, d, 32); \ + if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \ + PPC_ORIS(d, d, \ + ((uintptr_t)(i) >> 16) & 0xffff); \ + if ((uintptr_t)(i) & 0x000000000000ffffULL) \ + PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \ + } } while (0); + +static inline bool is_nearbranch(int offset) +{ + return (offset < 32768) && (offset >= -32768); +} + +/* + * The fly in the ointment of code size changing from pass to pass is + * avoided by padding the short branch case with a NOP. If code size differs + * with different branch reaches we will have the issue of code moving from + * one pass to the next and will need a few passes to converge on a stable + * state. + */ +#define PPC_BCC(cond, dest) do { \ + if (is_nearbranch((dest) - (ctx->idx * 4))) { \ + PPC_BCC_SHORT(cond, dest); \ + PPC_NOP(); \ + } else { \ + /* Flip the 'T or F' bit to invert comparison */ \ + PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \ + PPC_JMP(dest); \ + } } while(0) + +/* To create a branch condition, select a bit of cr0... */ +#define CR0_LT 0 +#define CR0_GT 1 +#define CR0_EQ 2 +/* ...and modify BO[3] */ +#define COND_CMP_TRUE 0x100 +#define COND_CMP_FALSE 0x000 +/* Together, they make all required comparisons: */ +#define COND_GT (CR0_GT | COND_CMP_TRUE) +#define COND_GE (CR0_LT | COND_CMP_FALSE) +#define COND_EQ (CR0_EQ | COND_CMP_TRUE) +#define COND_NE (CR0_EQ | COND_CMP_FALSE) +#define COND_LT (CR0_LT | COND_CMP_TRUE) + +#define SEEN_DATAREF 0x10000 /* might call external helpers */ +#define SEEN_XREG 0x20000 /* X reg is used */ +#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary + * storage */ +#define SEEN_MEM_MSK 0x0ffff + +struct codegen_context { + unsigned int seen; + unsigned int idx; + int pc_ret0; /* bpf index of first RET #0 instruction (if any) */ +}; + +#endif + +#endif diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S new file mode 100644 index 00000000000..ff4506e85cc --- /dev/null +++ b/arch/powerpc/net/bpf_jit_64.S @@ -0,0 +1,138 @@ +/* bpf_jit.S: Packet/header access helper functions + * for PPC64 BPF compiler. + * + * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include <asm/ppc_asm.h> +#include "bpf_jit.h" + +/* + * All of these routines are called directly from generated code, + * whose register usage is: + * + * r3 skb + * r4,r5 A,X + * r6 *** address parameter to helper *** + * r7-r10 scratch + * r14 skb->data + * r15 skb headlen + * r16-31 M[] + */ + +/* + * To consider: These helpers are so small it could be better to just + * generate them inline. Inline code can do the simple headlen check + * then branch directly to slow_path_XXX if required. (In fact, could + * load a spare GPR with the address of slow_path_generic and pass size + * as an argument, making the call site a mtlr, li and bllr.) + * + * Technically, the "is addr < 0" check is unnecessary & slowing down + * the ABS path, as it's statically checked on generation. + */ + .globl sk_load_word +sk_load_word: + cmpdi r_addr, 0 + blt bpf_error + /* Are we accessing past headlen? */ + subi r_scratch1, r_HL, 4 + cmpd r_scratch1, r_addr + blt bpf_slow_path_word + /* Nope, just hitting the header. cr0 here is eq or gt! */ + lwzx r_A, r_D, r_addr + /* When big endian we don't need to byteswap. */ + blr /* Return success, cr0 != LT */ + + .globl sk_load_half +sk_load_half: + cmpdi r_addr, 0 + blt bpf_error + subi r_scratch1, r_HL, 2 + cmpd r_scratch1, r_addr + blt bpf_slow_path_half + lhzx r_A, r_D, r_addr + blr + + .globl sk_load_byte +sk_load_byte: + cmpdi r_addr, 0 + blt bpf_error + cmpd r_HL, r_addr + ble bpf_slow_path_byte + lbzx r_A, r_D, r_addr + blr + +/* + * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) + * r_addr is the offset value, already known positive + */ + .globl sk_load_byte_msh +sk_load_byte_msh: + cmpd r_HL, r_addr + ble bpf_slow_path_byte_msh + lbzx r_X, r_D, r_addr + rlwinm r_X, r_X, 2, 32-4-2, 31-2 + blr + +bpf_error: + /* Entered with cr0 = lt */ + li r3, 0 + /* Generated code will 'blt epilogue', returning 0. */ + blr + +/* Call out to skb_copy_bits: + * We'll need to back up our volatile regs first; we have + * local variable space at r1+(BPF_PPC_STACK_BASIC). + * Allocate a new stack frame here to remain ABI-compliant in + * stashing LR. + */ +#define bpf_slow_path_common(SIZE) \ + mflr r0; \ + std r0, 16(r1); \ + /* R3 goes in parameter space of caller's frame */ \ + std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \ + stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ + /* R3 = r_skb, as passed */ \ + mr r4, r_addr; \ + li r6, SIZE; \ + bl skb_copy_bits; \ + /* R3 = 0 on success */ \ + addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ + ld r0, 16(r1); \ + ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + mtlr r0; \ + cmpdi r3, 0; \ + blt bpf_error; /* cr0 = LT */ \ + ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + /* Great success! */ + +bpf_slow_path_word: + bpf_slow_path_common(4) + /* Data value is on stack, and cr0 != LT */ + lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) + blr + +bpf_slow_path_half: + bpf_slow_path_common(2) + lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) + blr + +bpf_slow_path_byte: + bpf_slow_path_common(1) + lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) + blr + +bpf_slow_path_byte_msh: + bpf_slow_path_common(1) + lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) + rlwinm r_X, r_X, 2, 32-4-2, 31-2 + blr diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c new file mode 100644 index 00000000000..73619d3aeb6 --- /dev/null +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -0,0 +1,694 @@ +/* bpf_jit_comp.c: BPF JIT compiler for PPC64 + * + * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation + * + * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#include <linux/moduleloader.h> +#include <asm/cacheflush.h> +#include <linux/netdevice.h> +#include <linux/filter.h> +#include "bpf_jit.h" + +#ifndef __BIG_ENDIAN +/* There are endianness assumptions herein. */ +#error "Little-endian PPC not supported in BPF compiler" +#endif + +int bpf_jit_enable __read_mostly; + + +static inline void bpf_flush_icache(void *start, void *end) +{ + smp_wmb(); + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, + struct codegen_context *ctx) +{ + int i; + const struct sock_filter *filter = fp->insns; + + if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { + /* Make stackframe */ + if (ctx->seen & SEEN_DATAREF) { + /* If we call any helpers (for loads), save LR */ + EMIT(PPC_INST_MFLR | __PPC_RT(0)); + PPC_STD(0, 1, 16); + + /* Back up non-volatile regs. */ + PPC_STD(r_D, 1, -(8*(32-r_D))); + PPC_STD(r_HL, 1, -(8*(32-r_HL))); + } + if (ctx->seen & SEEN_MEM) { + /* + * Conditionally save regs r15-r31 as some will be used + * for M[] data. + */ + for (i = r_M; i < (r_M+16); i++) { + if (ctx->seen & (1 << (i-r_M))) + PPC_STD(i, 1, -(8*(32-i))); + } + } + EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) | + (-BPF_PPC_STACKFRAME & 0xfffc)); + } + + if (ctx->seen & SEEN_DATAREF) { + /* + * If this filter needs to access skb data, + * prepare r_D and r_HL: + * r_HL = skb->len - skb->data_len + * r_D = skb->data + */ + PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, + data_len)); + PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); + PPC_SUB(r_HL, r_HL, r_scratch1); + PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); + } + + if (ctx->seen & SEEN_XREG) { + /* + * TODO: Could also detect whether first instr. sets X and + * avoid this (as below, with A). + */ + PPC_LI(r_X, 0); + } + + switch (filter[0].code) { + case BPF_S_RET_K: + case BPF_S_LD_W_LEN: + case BPF_S_ANC_PROTOCOL: + case BPF_S_ANC_IFINDEX: + case BPF_S_ANC_MARK: + case BPF_S_ANC_RXHASH: + case BPF_S_ANC_CPU: + case BPF_S_ANC_QUEUE: + case BPF_S_LD_W_ABS: + case BPF_S_LD_H_ABS: + case BPF_S_LD_B_ABS: + /* first instruction sets A register (or is RET 'constant') */ + break; + default: + /* make sure we dont leak kernel information to user */ + PPC_LI(r_A, 0); + } +} + +static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) +{ + int i; + + if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { + PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); + if (ctx->seen & SEEN_DATAREF) { + PPC_LD(0, 1, 16); + PPC_MTLR(0); + PPC_LD(r_D, 1, -(8*(32-r_D))); + PPC_LD(r_HL, 1, -(8*(32-r_HL))); + } + if (ctx->seen & SEEN_MEM) { + /* Restore any saved non-vol registers */ + for (i = r_M; i < (r_M+16); i++) { + if (ctx->seen & (1 << (i-r_M))) + PPC_LD(i, 1, -(8*(32-i))); + } + } + } + /* The RETs have left a return value in R3. */ + + PPC_BLR(); +} + +/* Assemble the body code between the prologue & epilogue. */ +static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, + struct codegen_context *ctx, + unsigned int *addrs) +{ + const struct sock_filter *filter = fp->insns; + int flen = fp->len; + u8 *func; + unsigned int true_cond; + int i; + + /* Start of epilogue code */ + unsigned int exit_addr = addrs[flen]; + + for (i = 0; i < flen; i++) { + unsigned int K = filter[i].k; + + /* + * addrs[] maps a BPF bytecode address into a real offset from + * the start of the body code. + */ + addrs[i] = ctx->idx * 4; + + switch (filter[i].code) { + /*** ALU ops ***/ + case BPF_S_ALU_ADD_X: /* A += X; */ + ctx->seen |= SEEN_XREG; + PPC_ADD(r_A, r_A, r_X); + break; + case BPF_S_ALU_ADD_K: /* A += K; */ + if (!K) + break; + PPC_ADDI(r_A, r_A, IMM_L(K)); + if (K >= 32768) + PPC_ADDIS(r_A, r_A, IMM_HA(K)); + break; + case BPF_S_ALU_SUB_X: /* A -= X; */ + ctx->seen |= SEEN_XREG; + PPC_SUB(r_A, r_A, r_X); + break; + case BPF_S_ALU_SUB_K: /* A -= K */ + if (!K) + break; + PPC_ADDI(r_A, r_A, IMM_L(-K)); + if (K >= 32768) + PPC_ADDIS(r_A, r_A, IMM_HA(-K)); + break; + case BPF_S_ALU_MUL_X: /* A *= X; */ + ctx->seen |= SEEN_XREG; + PPC_MUL(r_A, r_A, r_X); + break; + case BPF_S_ALU_MUL_K: /* A *= K */ + if (K < 32768) + PPC_MULI(r_A, r_A, K); + else { + PPC_LI32(r_scratch1, K); + PPC_MUL(r_A, r_A, r_scratch1); + } + break; + case BPF_S_ALU_DIV_X: /* A /= X; */ + ctx->seen |= SEEN_XREG; + PPC_CMPWI(r_X, 0); + if (ctx->pc_ret0 != -1) { + PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); + } else { + /* + * Exit, returning 0; first pass hits here + * (longer worst-case code size). + */ + PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); + PPC_LI(r_ret, 0); + PPC_JMP(exit_addr); + } + PPC_DIVWU(r_A, r_A, r_X); + break; + case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ + PPC_LI32(r_scratch1, K); + /* Top 32 bits of 64bit result -> A */ + PPC_MULHWU(r_A, r_A, r_scratch1); + break; + case BPF_S_ALU_AND_X: + ctx->seen |= SEEN_XREG; + PPC_AND(r_A, r_A, r_X); + break; + case BPF_S_ALU_AND_K: + if (!IMM_H(K)) + PPC_ANDI(r_A, r_A, K); + else { + PPC_LI32(r_scratch1, K); + PPC_AND(r_A, r_A, r_scratch1); + } + break; + case BPF_S_ALU_OR_X: + ctx->seen |= SEEN_XREG; + PPC_OR(r_A, r_A, r_X); + break; + case BPF_S_ALU_OR_K: + if (IMM_L(K)) + PPC_ORI(r_A, r_A, IMM_L(K)); + if (K >= 65536) + PPC_ORIS(r_A, r_A, IMM_H(K)); + break; + case BPF_S_ALU_LSH_X: /* A <<= X; */ + ctx->seen |= SEEN_XREG; + PPC_SLW(r_A, r_A, r_X); + break; + case BPF_S_ALU_LSH_K: + if (K == 0) + break; + else + PPC_SLWI(r_A, r_A, K); + break; + case BPF_S_ALU_RSH_X: /* A >>= X; */ + ctx->seen |= SEEN_XREG; + PPC_SRW(r_A, r_A, r_X); + break; + case BPF_S_ALU_RSH_K: /* A >>= K; */ + if (K == 0) + break; + else + PPC_SRWI(r_A, r_A, K); + break; + case BPF_S_ALU_NEG: + PPC_NEG(r_A, r_A); + break; + case BPF_S_RET_K: + PPC_LI32(r_ret, K); + if (!K) { + if (ctx->pc_ret0 == -1) + ctx->pc_ret0 = i; + } + /* + * If this isn't the very last instruction, branch to + * the epilogue if we've stuff to clean up. Otherwise, + * if there's nothing to tidy, just return. If we /are/ + * the last instruction, we're about to fall through to + * the epilogue to return. + */ + if (i != flen - 1) { + /* + * Note: 'seen' is properly valid only on pass + * #2. Both parts of this conditional are the + * same instruction size though, meaning the + * first pass will still correctly determine the + * code size/addresses. + */ + if (ctx->seen) + PPC_JMP(exit_addr); + else + PPC_BLR(); + } + break; + case BPF_S_RET_A: + PPC_MR(r_ret, r_A); + if (i != flen - 1) { + if (ctx->seen) + PPC_JMP(exit_addr); + else + PPC_BLR(); + } + break; + case BPF_S_MISC_TAX: /* X = A */ + PPC_MR(r_X, r_A); + break; + case BPF_S_MISC_TXA: /* A = X */ + ctx->seen |= SEEN_XREG; + PPC_MR(r_A, r_X); + break; + + /*** Constant loads/M[] access ***/ + case BPF_S_LD_IMM: /* A = K */ + PPC_LI32(r_A, K); + break; + case BPF_S_LDX_IMM: /* X = K */ + PPC_LI32(r_X, K); + break; + case BPF_S_LD_MEM: /* A = mem[K] */ + PPC_MR(r_A, r_M + (K & 0xf)); + ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); + break; + case BPF_S_LDX_MEM: /* X = mem[K] */ + PPC_MR(r_X, r_M + (K & 0xf)); + ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); + break; + case BPF_S_ST: /* mem[K] = A */ + PPC_MR(r_M + (K & 0xf), r_A); + ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); + break; + case BPF_S_STX: /* mem[K] = X */ + PPC_MR(r_M + (K & 0xf), r_X); + ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); + break; + case BPF_S_LD_W_LEN: /* A = skb->len; */ + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); + PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); + break; + case BPF_S_LDX_W_LEN: /* X = skb->len; */ + PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); + break; + + /*** Ancillary info loads ***/ + + /* None of the BPF_S_ANC* codes appear to be passed by + * sk_chk_filter(). The interpreter and the x86 BPF + * compiler implement them so we do too -- they may be + * planted in future. + */ + case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, + protocol) != 2); + PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, + protocol)); + /* ntohs is a NOP with BE loads. */ + break; + case BPF_S_ANC_IFINDEX: + PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, + dev)); + PPC_CMPDI(r_scratch1, 0); + if (ctx->pc_ret0 != -1) { + PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); + } else { + /* Exit, returning 0; first pass hits here. */ + PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); + PPC_LI(r_ret, 0); + PPC_JMP(exit_addr); + } + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, + ifindex) != 4); + PPC_LWZ_OFFS(r_A, r_scratch1, + offsetof(struct net_device, ifindex)); + break; + case BPF_S_ANC_MARK: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); + PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, + mark)); + break; + case BPF_S_ANC_RXHASH: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); + PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, + rxhash)); + break; + case BPF_S_ANC_QUEUE: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, + queue_mapping) != 2); + PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, + queue_mapping)); + break; + case BPF_S_ANC_CPU: +#ifdef CONFIG_SMP + /* + * PACA ptr is r13: + * raw_smp_processor_id() = local_paca->paca_index + */ + BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, + paca_index) != 2); + PPC_LHZ_OFFS(r_A, 13, + offsetof(struct paca_struct, paca_index)); +#else + PPC_LI(r_A, 0); +#endif + break; + + /*** Absolute loads from packet header/data ***/ + case BPF_S_LD_W_ABS: + func = sk_load_word; + goto common_load; + case BPF_S_LD_H_ABS: + func = sk_load_half; + goto common_load; + case BPF_S_LD_B_ABS: + func = sk_load_byte; + common_load: + /* + * Load from [K]. Reference with the (negative) + * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported. + */ + ctx->seen |= SEEN_DATAREF; + if ((int)K < 0) + return -ENOTSUPP; + PPC_LI64(r_scratch1, func); + PPC_MTLR(r_scratch1); + PPC_LI32(r_addr, K); + PPC_BLRL(); + /* + * Helper returns 'lt' condition on error, and an + * appropriate return value in r3 + */ + PPC_BCC(COND_LT, exit_addr); + break; + + /*** Indirect loads from packet header/data ***/ + case BPF_S_LD_W_IND: + func = sk_load_word; + goto common_load_ind; + case BPF_S_LD_H_IND: + func = sk_load_half; + goto common_load_ind; + case BPF_S_LD_B_IND: + func = sk_load_byte; + common_load_ind: + /* + * Load from [X + K]. Negative offsets are tested for + * in the helper functions, and result in a 'ret 0'. + */ + ctx->seen |= SEEN_DATAREF | SEEN_XREG; + PPC_LI64(r_scratch1, func); + PPC_MTLR(r_scratch1); + PPC_ADDI(r_addr, r_X, IMM_L(K)); + if (K >= 32768) + PPC_ADDIS(r_addr, r_addr, IMM_HA(K)); + PPC_BLRL(); + /* If error, cr0.LT set */ + PPC_BCC(COND_LT, exit_addr); + break; + + case BPF_S_LDX_B_MSH: + /* + * x86 version drops packet (RET 0) when K<0, whereas + * interpreter does allow K<0 (__load_pointer, special + * ancillary data). common_load returns ENOTSUPP if K<0, + * so we fall back to interpreter & filter works. + */ + func = sk_load_byte_msh; + goto common_load; + break; + + /*** Jump and branches ***/ + case BPF_S_JMP_JA: + if (K != 0) + PPC_JMP(addrs[i + 1 + K]); + break; + + case BPF_S_JMP_JGT_K: + case BPF_S_JMP_JGT_X: + true_cond = COND_GT; + goto cond_branch; + case BPF_S_JMP_JGE_K: + case BPF_S_JMP_JGE_X: + true_cond = COND_GE; + goto cond_branch; + case BPF_S_JMP_JEQ_K: + case BPF_S_JMP_JEQ_X: + true_cond = COND_EQ; + goto cond_branch; + case BPF_S_JMP_JSET_K: + case BPF_S_JMP_JSET_X: + true_cond = COND_NE; + /* Fall through */ + cond_branch: + /* same targets, can avoid doing the test :) */ + if (filter[i].jt == filter[i].jf) { + if (filter[i].jt > 0) + PPC_JMP(addrs[i + 1 + filter[i].jt]); + break; + } + + switch (filter[i].code) { + case BPF_S_JMP_JGT_X: + case BPF_S_JMP_JGE_X: + case BPF_S_JMP_JEQ_X: + ctx->seen |= SEEN_XREG; + PPC_CMPLW(r_A, r_X); + break; + case BPF_S_JMP_JSET_X: + ctx->seen |= SEEN_XREG; + PPC_AND_DOT(r_scratch1, r_A, r_X); + break; + case BPF_S_JMP_JEQ_K: + case BPF_S_JMP_JGT_K: + case BPF_S_JMP_JGE_K: + if (K < 32768) + PPC_CMPLWI(r_A, K); + else { + PPC_LI32(r_scratch1, K); + PPC_CMPLW(r_A, r_scratch1); + } + break; + case BPF_S_JMP_JSET_K: + if (K < 32768) + /* PPC_ANDI is /only/ dot-form */ + PPC_ANDI(r_scratch1, r_A, K); + else { + PPC_LI32(r_scratch1, K); + PPC_AND_DOT(r_scratch1, r_A, + r_scratch1); + } + break; + } + /* Sometimes branches are constructed "backward", with + * the false path being the branch and true path being + * a fallthrough to the next instruction. + */ + if (filter[i].jt == 0) + /* Swap the sense of the branch */ + PPC_BCC(true_cond ^ COND_CMP_TRUE, + addrs[i + 1 + filter[i].jf]); + else { + PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]); + if (filter[i].jf != 0) + PPC_JMP(addrs[i + 1 + filter[i].jf]); + } + break; + default: + /* The filter contains something cruel & unusual. + * We don't handle it, but also there shouldn't be + * anything missing from our list. + */ + if (printk_ratelimit()) + pr_err("BPF filter opcode %04x (@%d) unsupported\n", + filter[i].code, i); + return -ENOTSUPP; + } + + } + /* Set end-of-body-code address for exit. */ + addrs[i] = ctx->idx * 4; + + return 0; +} + +void bpf_jit_compile(struct sk_filter *fp) +{ + unsigned int proglen; + unsigned int alloclen; + u32 *image = NULL; + u32 *code_base; + unsigned int *addrs; + struct codegen_context cgctx; + int pass; + int flen = fp->len; + + if (!bpf_jit_enable) + return; + + addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL); + if (addrs == NULL) + return; + + /* + * There are multiple assembly passes as the generated code will change + * size as it settles down, figuring out the max branch offsets/exit + * paths required. + * + * The range of standard conditional branches is +/- 32Kbytes. Since + * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to + * finish with 8 bytes/instruction. Not feasible, so long jumps are + * used, distinct from short branches. + * + * Current: + * + * For now, both branch types assemble to 2 words (short branches padded + * with a NOP); this is less efficient, but assembly will always complete + * after exactly 3 passes: + * + * First pass: No code buffer; Program is "faux-generated" -- no code + * emitted but maximum size of output determined (and addrs[] filled + * in). Also, we note whether we use M[], whether we use skb data, etc. + * All generation choices assumed to be 'worst-case', e.g. branches all + * far (2 instructions), return path code reduction not available, etc. + * + * Second pass: Code buffer allocated with size determined previously. + * Prologue generated to support features we have seen used. Exit paths + * determined and addrs[] is filled in again, as code may be slightly + * smaller as a result. + * + * Third pass: Code generated 'for real', and branch destinations + * determined from now-accurate addrs[] map. + * + * Ideal: + * + * If we optimise this, near branches will be shorter. On the + * first assembly pass, we should err on the side of caution and + * generate the biggest code. On subsequent passes, branches will be + * generated short or long and code size will reduce. With smaller + * code, more branches may fall into the short category, and code will + * reduce more. + * + * Finally, if we see one pass generate code the same size as the + * previous pass we have converged and should now generate code for + * real. Allocating at the end will also save the memory that would + * otherwise be wasted by the (small) current code shrinkage. + * Preferably, we should do a small number of passes (e.g. 5) and if we + * haven't converged by then, get impatient and force code to generate + * as-is, even if the odd branch would be left long. The chances of a + * long jump are tiny with all but the most enormous of BPF filter + * inputs, so we should usually converge on the third pass. + */ + + cgctx.idx = 0; + cgctx.seen = 0; + cgctx.pc_ret0 = -1; + /* Scouting faux-generate pass 0 */ + if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) + /* We hit something illegal or unsupported. */ + goto out; + + /* + * Pretend to build prologue, given the features we've seen. This will + * update ctgtx.idx as it pretends to output instructions, then we can + * calculate total size from idx. + */ + bpf_jit_build_prologue(fp, 0, &cgctx); + bpf_jit_build_epilogue(0, &cgctx); + + proglen = cgctx.idx * 4; + alloclen = proglen + FUNCTION_DESCR_SIZE; + image = module_alloc(max_t(unsigned int, alloclen, + sizeof(struct work_struct))); + if (!image) + goto out; + + code_base = image + (FUNCTION_DESCR_SIZE/4); + + /* Code generation passes 1-2 */ + for (pass = 1; pass < 3; pass++) { + /* Now build the prologue, body code & epilogue for real. */ + cgctx.idx = 0; + bpf_jit_build_prologue(fp, code_base, &cgctx); + bpf_jit_build_body(fp, code_base, &cgctx, addrs); + bpf_jit_build_epilogue(code_base, &cgctx); + + if (bpf_jit_enable > 1) + pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, + proglen - (cgctx.idx * 4), cgctx.seen); + } + + if (bpf_jit_enable > 1) + pr_info("flen=%d proglen=%u pass=%d image=%p\n", + flen, proglen, pass, image); + + if (image) { + if (bpf_jit_enable > 1) + print_hex_dump(KERN_ERR, "JIT code: ", + DUMP_PREFIX_ADDRESS, + 16, 1, code_base, + proglen, false); + + bpf_flush_icache(code_base, code_base + (proglen/4)); + /* Function descriptor nastiness: Address + TOC */ + ((u64 *)image)[0] = (u64)code_base; + ((u64 *)image)[1] = local_paca->kernel_toc; + fp->bpf_func = (void *)image; + } +out: + kfree(addrs); + return; +} + +static void jit_free_defer(struct work_struct *arg) +{ + module_free(NULL, arg); +} + +/* run from softirq, we must use a work_struct to call + * module_free() from process context + */ +void bpf_jit_free(struct sk_filter *fp) +{ + if (fp->bpf_func != sk_run_filter) { + struct work_struct *work = (struct work_struct *)fp->bpf_func; + + INIT_WORK(work, jit_free_defer); + schedule_work(work); + } +} diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig index 47ea1be1481..90f4496017e 100644 --- a/arch/powerpc/platforms/52xx/Kconfig +++ b/arch/powerpc/platforms/52xx/Kconfig @@ -55,14 +55,6 @@ config PPC_MPC5200_BUGFIX It is safe to say 'Y' here -config PPC_MPC5200_GPIO - bool "MPC5200 GPIO support" - depends on PPC_MPC52xx - select ARCH_REQUIRE_GPIOLIB - select GENERIC_GPIO - help - Enable gpiolib support for mpc5200 based boards - config PPC_MPC5200_LPBFIFO tristate "MPC5200 LocalPlus bus FIFO driver" depends on PPC_MPC52xx diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile index 2bc8cd0c5cf..4e62486791e 100644 --- a/arch/powerpc/platforms/52xx/Makefile +++ b/arch/powerpc/platforms/52xx/Makefile @@ -14,5 +14,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y) obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o endif -obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c deleted file mode 100644 index 1757d1db4b5..00000000000 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c +++ /dev/null @@ -1,380 +0,0 @@ -/* - * MPC52xx gpio driver - * - * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include <linux/of.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/of_gpio.h> -#include <linux/io.h> -#include <linux/of_platform.h> - -#include <asm/gpio.h> -#include <asm/mpc52xx.h> -#include <sysdev/fsl_soc.h> - -static DEFINE_SPINLOCK(gpio_lock); - -struct mpc52xx_gpiochip { - struct of_mm_gpio_chip mmchip; - unsigned int shadow_dvo; - unsigned int shadow_gpioe; - unsigned int shadow_ddr; -}; - -/* - * GPIO LIB API implementation for wakeup GPIOs. - * - * There's a maximum of 8 wakeup GPIOs. Which of these are available - * for use depends on your board setup. - * - * 0 -> GPIO_WKUP_7 - * 1 -> GPIO_WKUP_6 - * 2 -> PSC6_1 - * 3 -> PSC6_0 - * 4 -> ETH_17 - * 5 -> PSC3_9 - * 6 -> PSC2_4 - * 7 -> PSC1_4 - * - */ -static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; - unsigned int ret; - - ret = (in_8(®s->wkup_ival) >> (7 - gpio)) & 1; - - pr_debug("%s: gpio: %d ret: %d\n", __func__, gpio, ret); - - return ret; -} - -static inline void -__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; - - if (val) - chip->shadow_dvo |= 1 << (7 - gpio); - else - chip->shadow_dvo &= ~(1 << (7 - gpio)); - - out_8(®s->wkup_dvo, chip->shadow_dvo); -} - -static void -mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) -{ - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - __mpc52xx_wkup_gpio_set(gc, gpio, val); - - spin_unlock_irqrestore(&gpio_lock, flags); - - pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); -} - -static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - /* set the direction */ - chip->shadow_ddr &= ~(1 << (7 - gpio)); - out_8(®s->wkup_ddr, chip->shadow_ddr); - - /* and enable the pin */ - chip->shadow_gpioe |= 1 << (7 - gpio); - out_8(®s->wkup_gpioe, chip->shadow_gpioe); - - spin_unlock_irqrestore(&gpio_lock, flags); - - return 0; -} - -static int -mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - __mpc52xx_wkup_gpio_set(gc, gpio, val); - - /* Then set direction */ - chip->shadow_ddr |= 1 << (7 - gpio); - out_8(®s->wkup_ddr, chip->shadow_ddr); - - /* Finally enable the pin */ - chip->shadow_gpioe |= 1 << (7 - gpio); - out_8(®s->wkup_gpioe, chip->shadow_gpioe); - - spin_unlock_irqrestore(&gpio_lock, flags); - - pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); - - return 0; -} - -static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev) -{ - struct mpc52xx_gpiochip *chip; - struct mpc52xx_gpio_wkup __iomem *regs; - struct gpio_chip *gc; - int ret; - - chip = kzalloc(sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; - - gc = &chip->mmchip.gc; - - gc->ngpio = 8; - gc->direction_input = mpc52xx_wkup_gpio_dir_in; - gc->direction_output = mpc52xx_wkup_gpio_dir_out; - gc->get = mpc52xx_wkup_gpio_get; - gc->set = mpc52xx_wkup_gpio_set; - - ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip); - if (ret) - return ret; - - regs = chip->mmchip.regs; - chip->shadow_gpioe = in_8(®s->wkup_gpioe); - chip->shadow_ddr = in_8(®s->wkup_ddr); - chip->shadow_dvo = in_8(®s->wkup_dvo); - - return 0; -} - -static int mpc52xx_gpiochip_remove(struct platform_device *ofdev) -{ - return -EBUSY; -} - -static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = { - { - .compatible = "fsl,mpc5200-gpio-wkup", - }, - {} -}; - -static struct platform_driver mpc52xx_wkup_gpiochip_driver = { - .driver = { - .name = "gpio_wkup", - .owner = THIS_MODULE, - .of_match_table = mpc52xx_wkup_gpiochip_match, - }, - .probe = mpc52xx_wkup_gpiochip_probe, - .remove = mpc52xx_gpiochip_remove, -}; - -/* - * GPIO LIB API implementation for simple GPIOs - * - * There's a maximum of 32 simple GPIOs. Which of these are available - * for use depends on your board setup. - * The numbering reflects the bit numbering in the port registers: - * - * 0..1 > reserved - * 2..3 > IRDA - * 4..7 > ETHR - * 8..11 > reserved - * 12..15 > USB - * 16..17 > reserved - * 18..23 > PSC3 - * 24..27 > PSC2 - * 28..31 > PSC1 - */ -static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpio __iomem *regs = mm_gc->regs; - unsigned int ret; - - ret = (in_be32(®s->simple_ival) >> (31 - gpio)) & 1; - - return ret; -} - -static inline void -__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - struct mpc52xx_gpio __iomem *regs = mm_gc->regs; - - if (val) - chip->shadow_dvo |= 1 << (31 - gpio); - else - chip->shadow_dvo &= ~(1 << (31 - gpio)); - out_be32(®s->simple_dvo, chip->shadow_dvo); -} - -static void -mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) -{ - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - __mpc52xx_simple_gpio_set(gc, gpio, val); - - spin_unlock_irqrestore(&gpio_lock, flags); - - pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); -} - -static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - struct mpc52xx_gpio __iomem *regs = mm_gc->regs; - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - /* set the direction */ - chip->shadow_ddr &= ~(1 << (31 - gpio)); - out_be32(®s->simple_ddr, chip->shadow_ddr); - - /* and enable the pin */ - chip->shadow_gpioe |= 1 << (31 - gpio); - out_be32(®s->simple_gpioe, chip->shadow_gpioe); - - spin_unlock_irqrestore(&gpio_lock, flags); - - return 0; -} - -static int -mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - struct mpc52xx_gpio __iomem *regs = mm_gc->regs; - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - /* First set initial value */ - __mpc52xx_simple_gpio_set(gc, gpio, val); - - /* Then set direction */ - chip->shadow_ddr |= 1 << (31 - gpio); - out_be32(®s->simple_ddr, chip->shadow_ddr); - - /* Finally enable the pin */ - chip->shadow_gpioe |= 1 << (31 - gpio); - out_be32(®s->simple_gpioe, chip->shadow_gpioe); - - spin_unlock_irqrestore(&gpio_lock, flags); - - pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); - - return 0; -} - -static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev) -{ - struct mpc52xx_gpiochip *chip; - struct gpio_chip *gc; - struct mpc52xx_gpio __iomem *regs; - int ret; - - chip = kzalloc(sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; - - gc = &chip->mmchip.gc; - - gc->ngpio = 32; - gc->direction_input = mpc52xx_simple_gpio_dir_in; - gc->direction_output = mpc52xx_simple_gpio_dir_out; - gc->get = mpc52xx_simple_gpio_get; - gc->set = mpc52xx_simple_gpio_set; - - ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip); - if (ret) - return ret; - - regs = chip->mmchip.regs; - chip->shadow_gpioe = in_be32(®s->simple_gpioe); - chip->shadow_ddr = in_be32(®s->simple_ddr); - chip->shadow_dvo = in_be32(®s->simple_dvo); - - return 0; -} - -static const struct of_device_id mpc52xx_simple_gpiochip_match[] = { - { - .compatible = "fsl,mpc5200-gpio", - }, - {} -}; - -static struct platform_driver mpc52xx_simple_gpiochip_driver = { - .driver = { - .name = "gpio", - .owner = THIS_MODULE, - .of_match_table = mpc52xx_simple_gpiochip_match, - }, - .probe = mpc52xx_simple_gpiochip_probe, - .remove = mpc52xx_gpiochip_remove, -}; - -static int __init mpc52xx_gpio_init(void) -{ - if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver)) - printk(KERN_ERR "Unable to register wakeup GPIO driver\n"); - - if (platform_driver_register(&mpc52xx_simple_gpiochip_driver)) - printk(KERN_ERR "Unable to register simple GPIO driver\n"); - - return 0; -} - - -/* Make sure we get initialised before anyone else tries to use us */ -subsys_initcall(mpc52xx_gpio_init); - -/* No exit call at the moment as we cannot unregister of gpio chips */ - -MODULE_DESCRIPTION("Freescale MPC52xx gpio driver"); -MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de"); -MODULE_LICENSE("GPL v2"); - diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index f33e08d573c..abe8d7e2ebe 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/irq.h> +#include <linux/of_pci.h> #include <asm/sections.h> #include <asm/io.h> @@ -235,7 +236,7 @@ static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) if (offset >= 0x100) return PCIBIOS_BAD_REGISTER_NUMBER; - np = pci_busdev_to_OF_node(bus, devfn); + np = of_pci_find_child_device(bus->dev.of_node, devfn); if (np == NULL) return PCIBIOS_DEVICE_NOT_FOUND; diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index a65d2e82f61..a63d34c3611 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -331,7 +331,7 @@ void __kprobes do_per_trap(struct pt_regs *regs) { if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return; - if (tracehook_consider_fatal_signal(current, SIGTRAP)) + if (current->ptrace) force_sig(SIGTRAP, current); } @@ -425,7 +425,7 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) return; if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { - if (tracehook_consider_fatal_signal(current, SIGTRAP)) + if (current->ptrace) force_sig(SIGTRAP, current); else signal = SIGILL; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index fe103e891e7..095f782a551 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -299,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access, goto out; address = trans_exc_code & __FAIL_ADDR_MASK; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); flags = FAULT_FLAG_ALLOW_RETRY; if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) flags |= FAULT_FLAG_WRITE; @@ -345,11 +345,11 @@ retry: if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } if (fault & VM_FAULT_RETRY) { diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c index 748955df018..fa4f724b295 100644 --- a/arch/sh/kernel/cpu/sh4/perf_event.c +++ b/arch/sh/kernel/cpu/sh4/perf_event.c @@ -180,6 +180,21 @@ static const int sh7750_cache_events [ C(RESULT_MISS) ] = -1, }, }, + + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, }; static int sh7750_event_map(int event) diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c index 17e6bebfede..84a2c396cee 100644 --- a/arch/sh/kernel/cpu/sh4a/perf_event.c +++ b/arch/sh/kernel/cpu/sh4a/perf_event.c @@ -205,6 +205,21 @@ static const int sh4a_cache_events [ C(RESULT_MISS) ] = -1, }, }, + + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, }; static int sh4a_event_map(int event) diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c index 64c807c3920..bf280c812d2 100644 --- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c +++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c @@ -256,7 +256,7 @@ out: return ret; } -static struct dev_power_domain default_power_domain = { +static struct dev_pm_domain default_pm_domain = { .ops = { .runtime_suspend = default_platform_runtime_suspend, .runtime_resume = default_platform_runtime_resume, @@ -285,7 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb, hwblk_disable(hwblk_info, hwblk); /* make sure driver re-inits itself once */ __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); - dev->pwr_domain = &default_power_domain; + dev->pm_domain = &default_pm_domain; break; /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ case BUS_NOTIFY_BOUND_DRIVER: @@ -299,7 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb, __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); break; case BUS_NOTIFY_DEL_DEVICE: - dev->pwr_domain = NULL; + dev->pm_domain = NULL; break; } return 0; diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 3d7b209b217..92b3c276339 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -63,7 +63,7 @@ static inline int put_stack_long(struct task_struct *task, int offset, return 0; } -void ptrace_triggered(struct perf_event *bp, int nmi, +void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_event_attr attr; @@ -91,7 +91,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr) attr.bp_len = HW_BREAKPOINT_LEN_2; attr.bp_type = HW_BREAKPOINT_R; - bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); + bp = register_user_hw_breakpoint(&attr, ptrace_triggered, + NULL, tsk); if (IS_ERR(bp)) return PTR_ERR(bp); diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index b51a17104b5..d9006f8ffc1 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -393,7 +393,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, */ if (!expected) { unaligned_fixups_notify(current, instruction, regs); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); } diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index 6713ca97e55..67110be83fd 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c @@ -434,7 +434,7 @@ static int misaligned_load(struct pt_regs *regs, return error; } - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); destreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { @@ -512,7 +512,7 @@ static int misaligned_store(struct pt_regs *regs, return error; } - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); srcreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { @@ -588,7 +588,7 @@ static int misaligned_fpu_load(struct pt_regs *regs, return error; } - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address); destreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { @@ -665,7 +665,7 @@ static int misaligned_fpu_store(struct pt_regs *regs, return error; } - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address); srcreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c index f76a5090d5d..97719521065 100644 --- a/arch/sh/math-emu/math.c +++ b/arch/sh/math-emu/math.c @@ -620,7 +620,7 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs) struct task_struct *tsk = current; struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu); - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (!(task_thread_info(tsk)->status & TS_USEDFPU)) { /* initialize once. */ diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index d4c34d757f0..7bebd044f2a 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -160,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, if ((regs->sr & SR_IMASK) != SR_IMASK) local_irq_enable(); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* * If we're in an interrupt, have no user context or are running @@ -210,11 +210,11 @@ good_area: } if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 7f5810f5dfd..e3430e093d4 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, /* Not an IO address, so reenable interrupts */ local_irq_enable(); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* * If we're in an interrupt or have no user @@ -200,11 +200,11 @@ good_area: if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index 862e3ce92b1..02939abd356 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h @@ -42,9 +42,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, } #endif -struct device_node; -extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); - #endif /* __KERNEL__ */ #ifndef CONFIG_LEON_PCI diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 948b686ec08..2614d96141c 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h @@ -91,9 +91,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return PCI_IRQ_NONE; } -struct device_node; -extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); - #define HAVE_ARCH_PCI_RESOURCE_TO_USER extern void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h index c7ad3fe2b25..b928b31424b 100644 --- a/arch/sparc/include/asm/ptrace.h +++ b/arch/sparc/include/asm/ptrace.h @@ -205,6 +205,7 @@ do { current_thread_info()->syscall_noerror = 1; \ } while (0) #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) #define instruction_pointer(regs) ((regs)->tpc) +#define instruction_pointer_set(regs, val) ((regs)->tpc = (val)) #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) #define regs_return_value(regs) ((regs)->u_regs[UREG_I0]) #ifdef CONFIG_SMP diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 713dc91020a..80a87e2a3e7 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -284,7 +284,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev->sysdata = node; dev->dev.parent = bus->bridge; dev->dev.bus = &pci_bus_type; - dev->dev.of_node = node; + dev->dev.of_node = of_node_get(node); dev->devfn = devfn; dev->multifunction = 0; /* maybe a lie? */ set_pcie_port_type(dev); @@ -1021,12 +1021,6 @@ void arch_teardown_msi_irq(unsigned int irq) } #endif /* !(CONFIG_PCI_MSI) */ -struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) -{ - return pdev->dev.of_node; -} -EXPORT_SYMBOL(pci_device_to_OF_node); - static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) { struct pci_dev *ali_isa_bridge; diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index 948601a066f..a19f0419547 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -885,14 +885,6 @@ int pcibios_assign_resource(struct pci_dev *pdev, int resource) return -ENXIO; } -struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) -{ - struct pcidev_cookie *pc = pdev->sysdata; - - return pc->prom_node; -} -EXPORT_SYMBOL(pci_device_to_OF_node); - /* * This probably belongs here rather than ioport.c because * we do not want this crud linked into SBus kernels. diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2cb0e1c001e..62a034318b1 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -246,6 +246,20 @@ static const cache_map_t ultra3_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu ultra3_pmu = { @@ -361,6 +375,20 @@ static const cache_map_t niagara1_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu niagara1_pmu = { @@ -473,6 +501,20 @@ static const cache_map_t niagara2_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu niagara2_pmu = { @@ -1277,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, if (!sparc_perf_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 1, &data, regs)) + if (perf_event_overflow(event, &data, regs)) sparc_pmu_stop(event, 0); } diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 4491f4cb269..7efbb2f9e77 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -247,7 +247,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) unsigned long addr = compute_effective_address(regs, insn); int err; - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), @@ -338,7 +338,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) } addr = compute_effective_address(regs, insn); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch(dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index b2b019ea8ca..35cff1673aa 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -317,7 +317,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (asi) { case ASI_NL: case ASI_AIUPL: @@ -384,7 +384,7 @@ int handle_popc(u32 insn, struct pt_regs *regs) int ret, i, rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (insn & 0x2000) { maybe_flush_windows(0, 0, rd, from_kernel); value = sign_extend_imm13(insn); @@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) int asi = decode_asi(insn, regs); int flag = (freg < 32) ? FPRS_DL : FPRS_DU; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); save_and_clear_fpu(); current_thread_info()->xfsr[0] &= ~0x1c000; @@ -554,7 +554,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned long *reg; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); maybe_flush_windows(0, 0, rd, from_kernel); reg = fetch_reg_addr(rd, regs); @@ -586,7 +586,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("lddfmna from kernel", regs); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { @@ -647,7 +647,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("stdfmna from kernel", regs); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index 36357717d69..32b626c9d81 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c @@ -802,7 +802,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn) BUG_ON(regs->tstate & TSTATE_PRIV); - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c index a3fccde894e..aa4d55b0bdf 100644 --- a/arch/sparc/math-emu/math_32.c +++ b/arch/sparc/math-emu/math_32.c @@ -164,7 +164,7 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt) int retcode = 0; /* assume all succeed */ unsigned long insn; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); #ifdef DEBUG_MATHEMU printk("In do_mathemu()... pc is %08lx\n", regs->pc); diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c index 56d2c44747b..e575bd2fe38 100644 --- a/arch/sparc/math-emu/math_64.c +++ b/arch/sparc/math-emu/math_64.c @@ -184,7 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) if (tstate & TSTATE_PRIV) die_if_kernel("unfinished/unimplemented FPop from kernel", regs); - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 7543ddbdadb..aa1c1b1ce5c 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -251,7 +251,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, if (in_atomic() || !mm) goto no_context; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); down_read(&mm->mmap_sem); @@ -301,12 +301,10 @@ good_area: } if (fault & VM_FAULT_MAJOR) { current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } up_read(&mm->mmap_sem); return; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index f92ce56a8b2..504c0622f72 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -325,7 +325,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) if (in_atomic() || !mm) goto intr_or_no_mm; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); if (!down_read_trylock(&mm->mmap_sem)) { if ((regs->tstate & TSTATE_PRIV) && @@ -433,12 +433,10 @@ good_area: } if (fault & VM_FAULT_MAJOR) { current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } up_read(&mm->mmap_sem); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 37357a599dc..7d45601b27e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -680,33 +680,6 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT Calgary anyway, pass 'iommu=calgary' on the kernel command line. If unsure, say Y. -config AMD_IOMMU - bool "AMD IOMMU support" - select SWIOTLB - select PCI_MSI - select PCI_IOV - depends on X86_64 && PCI && ACPI - ---help--- - With this option you can enable support for AMD IOMMU hardware in - your system. An IOMMU is a hardware component which provides - remapping of DMA memory accesses from devices. With an AMD IOMMU you - can isolate the the DMA memory of different devices and protect the - system from misbehaving device drivers or hardware. - - You can find out if your system has an AMD IOMMU if you look into - your BIOS for an option to enable it or if you have an IVRS ACPI - table. - -config AMD_IOMMU_STATS - bool "Export AMD IOMMU statistics to debugfs" - depends on AMD_IOMMU - select DEBUG_FS - ---help--- - This option enables code in the AMD IOMMU driver to collect various - statistics about whats happening in the driver and exports that - information to userspace via debugfs. - If unsure, say N. - # need this always selected by IOMMU for the VIA workaround config SWIOTLB def_bool y if X86_64 @@ -720,9 +693,6 @@ config SWIOTLB config IOMMU_HELPER def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) -config IOMMU_API - def_bool (AMD_IOMMU || DMAR) - config MAXSMP bool "Enable Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL @@ -1942,55 +1912,6 @@ config PCI_CNB20LE_QUIRK You should say N unless you know you need this. -config DMAR - bool "Support for DMA Remapping Devices (EXPERIMENTAL)" - depends on PCI_MSI && ACPI && EXPERIMENTAL - help - DMA remapping (DMAR) devices support enables independent address - translations for Direct Memory Access (DMA) from devices. - These DMA remapping devices are reported via ACPI tables - and include PCI device scope covered by these DMA - remapping devices. - -config DMAR_DEFAULT_ON - def_bool y - prompt "Enable DMA Remapping Devices by default" - depends on DMAR - help - Selecting this option will enable a DMAR device at boot time if - one is found. If this option is not selected, DMAR support can - be enabled by passing intel_iommu=on to the kernel. It is - recommended you say N here while the DMAR code remains - experimental. - -config DMAR_BROKEN_GFX_WA - bool "Workaround broken graphics drivers (going away soon)" - depends on DMAR && BROKEN - ---help--- - Current Graphics drivers tend to use physical address - for DMA and avoid using DMA APIs. Setting this config - option permits the IOMMU driver to set a unity map for - all the OS-visible memory. Hence the driver can continue - to use physical addresses for DMA, at least until this - option is removed in the 2.6.32 kernel. - -config DMAR_FLOPPY_WA - def_bool y - depends on DMAR - ---help--- - Floppy disk drivers are known to bypass DMA API calls - thereby failing to work when IOMMU is enabled. This - workaround will setup a 1:1 mapping for the first - 16MiB to make floppy (an ISA device) work. - -config INTR_REMAP - bool "Support for Interrupt Remapping (EXPERIMENTAL)" - depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL - ---help--- - Supports Interrupt remapping for IO-APIC and MSI devices. - To use x2apic mode in the CPU's which support x2APIC enhancements or - to support platforms with CPU's having > 8 bit APIC ID, say Y. - source "drivers/pci/pcie/Kconfig" source "drivers/pci/Kconfig" diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h deleted file mode 100644 index a6863a2dec1..00000000000 --- a/arch/x86/include/asm/amd_iommu.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <joerg.roedel@amd.com> - * Leo Duran <leo.duran@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_X86_AMD_IOMMU_H -#define _ASM_X86_AMD_IOMMU_H - -#include <linux/irqreturn.h> - -#ifdef CONFIG_AMD_IOMMU - -extern int amd_iommu_detect(void); - -#else - -static inline int amd_iommu_detect(void) { return -ENODEV; } - -#endif - -#endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h deleted file mode 100644 index 55d95eb789b..00000000000 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <joerg.roedel@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_X86_AMD_IOMMU_PROTO_H -#define _ASM_X86_AMD_IOMMU_PROTO_H - -#include <asm/amd_iommu_types.h> - -extern int amd_iommu_init_dma_ops(void); -extern int amd_iommu_init_passthrough(void); -extern irqreturn_t amd_iommu_int_thread(int irq, void *data); -extern irqreturn_t amd_iommu_int_handler(int irq, void *data); -extern void amd_iommu_apply_erratum_63(u16 devid); -extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); -extern int amd_iommu_init_devices(void); -extern void amd_iommu_uninit_devices(void); -extern void amd_iommu_init_notifier(void); -extern void amd_iommu_init_api(void); -#ifndef CONFIG_AMD_IOMMU_STATS - -static inline void amd_iommu_stats_init(void) { } - -#endif /* !CONFIG_AMD_IOMMU_STATS */ - -static inline bool is_rd890_iommu(struct pci_dev *pdev) -{ - return (pdev->vendor == PCI_VENDOR_ID_ATI) && - (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); -} - -static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) -{ - if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) - return false; - - return !!(iommu->features & f); -} - -#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h deleted file mode 100644 index 4c998299541..00000000000 --- a/arch/x86/include/asm/amd_iommu_types.h +++ /dev/null @@ -1,580 +0,0 @@ -/* - * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <joerg.roedel@amd.com> - * Leo Duran <leo.duran@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_X86_AMD_IOMMU_TYPES_H -#define _ASM_X86_AMD_IOMMU_TYPES_H - -#include <linux/types.h> -#include <linux/mutex.h> -#include <linux/list.h> -#include <linux/spinlock.h> - -/* - * Maximum number of IOMMUs supported - */ -#define MAX_IOMMUS 32 - -/* - * some size calculation constants - */ -#define DEV_TABLE_ENTRY_SIZE 32 -#define ALIAS_TABLE_ENTRY_SIZE 2 -#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) - -/* Length of the MMIO region for the AMD IOMMU */ -#define MMIO_REGION_LENGTH 0x4000 - -/* Capability offsets used by the driver */ -#define MMIO_CAP_HDR_OFFSET 0x00 -#define MMIO_RANGE_OFFSET 0x0c -#define MMIO_MISC_OFFSET 0x10 - -/* Masks, shifts and macros to parse the device range capability */ -#define MMIO_RANGE_LD_MASK 0xff000000 -#define MMIO_RANGE_FD_MASK 0x00ff0000 -#define MMIO_RANGE_BUS_MASK 0x0000ff00 -#define MMIO_RANGE_LD_SHIFT 24 -#define MMIO_RANGE_FD_SHIFT 16 -#define MMIO_RANGE_BUS_SHIFT 8 -#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) -#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) -#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) -#define MMIO_MSI_NUM(x) ((x) & 0x1f) - -/* Flag masks for the AMD IOMMU exclusion range */ -#define MMIO_EXCL_ENABLE_MASK 0x01ULL -#define MMIO_EXCL_ALLOW_MASK 0x02ULL - -/* Used offsets into the MMIO space */ -#define MMIO_DEV_TABLE_OFFSET 0x0000 -#define MMIO_CMD_BUF_OFFSET 0x0008 -#define MMIO_EVT_BUF_OFFSET 0x0010 -#define MMIO_CONTROL_OFFSET 0x0018 -#define MMIO_EXCL_BASE_OFFSET 0x0020 -#define MMIO_EXCL_LIMIT_OFFSET 0x0028 -#define MMIO_EXT_FEATURES 0x0030 -#define MMIO_CMD_HEAD_OFFSET 0x2000 -#define MMIO_CMD_TAIL_OFFSET 0x2008 -#define MMIO_EVT_HEAD_OFFSET 0x2010 -#define MMIO_EVT_TAIL_OFFSET 0x2018 -#define MMIO_STATUS_OFFSET 0x2020 - - -/* Extended Feature Bits */ -#define FEATURE_PREFETCH (1ULL<<0) -#define FEATURE_PPR (1ULL<<1) -#define FEATURE_X2APIC (1ULL<<2) -#define FEATURE_NX (1ULL<<3) -#define FEATURE_GT (1ULL<<4) -#define FEATURE_IA (1ULL<<6) -#define FEATURE_GA (1ULL<<7) -#define FEATURE_HE (1ULL<<8) -#define FEATURE_PC (1ULL<<9) - -/* MMIO status bits */ -#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 - -/* event logging constants */ -#define EVENT_ENTRY_SIZE 0x10 -#define EVENT_TYPE_SHIFT 28 -#define EVENT_TYPE_MASK 0xf -#define EVENT_TYPE_ILL_DEV 0x1 -#define EVENT_TYPE_IO_FAULT 0x2 -#define EVENT_TYPE_DEV_TAB_ERR 0x3 -#define EVENT_TYPE_PAGE_TAB_ERR 0x4 -#define EVENT_TYPE_ILL_CMD 0x5 -#define EVENT_TYPE_CMD_HARD_ERR 0x6 -#define EVENT_TYPE_IOTLB_INV_TO 0x7 -#define EVENT_TYPE_INV_DEV_REQ 0x8 -#define EVENT_DEVID_MASK 0xffff -#define EVENT_DEVID_SHIFT 0 -#define EVENT_DOMID_MASK 0xffff -#define EVENT_DOMID_SHIFT 0 -#define EVENT_FLAGS_MASK 0xfff -#define EVENT_FLAGS_SHIFT 0x10 - -/* feature control bits */ -#define CONTROL_IOMMU_EN 0x00ULL -#define CONTROL_HT_TUN_EN 0x01ULL -#define CONTROL_EVT_LOG_EN 0x02ULL -#define CONTROL_EVT_INT_EN 0x03ULL -#define CONTROL_COMWAIT_EN 0x04ULL -#define CONTROL_PASSPW_EN 0x08ULL -#define CONTROL_RESPASSPW_EN 0x09ULL -#define CONTROL_COHERENT_EN 0x0aULL -#define CONTROL_ISOC_EN 0x0bULL -#define CONTROL_CMDBUF_EN 0x0cULL -#define CONTROL_PPFLOG_EN 0x0dULL -#define CONTROL_PPFINT_EN 0x0eULL - -/* command specific defines */ -#define CMD_COMPL_WAIT 0x01 -#define CMD_INV_DEV_ENTRY 0x02 -#define CMD_INV_IOMMU_PAGES 0x03 -#define CMD_INV_IOTLB_PAGES 0x04 -#define CMD_INV_ALL 0x08 - -#define CMD_COMPL_WAIT_STORE_MASK 0x01 -#define CMD_COMPL_WAIT_INT_MASK 0x02 -#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 -#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 - -#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL - -/* macros and definitions for device table entries */ -#define DEV_ENTRY_VALID 0x00 -#define DEV_ENTRY_TRANSLATION 0x01 -#define DEV_ENTRY_IR 0x3d -#define DEV_ENTRY_IW 0x3e -#define DEV_ENTRY_NO_PAGE_FAULT 0x62 -#define DEV_ENTRY_EX 0x67 -#define DEV_ENTRY_SYSMGT1 0x68 -#define DEV_ENTRY_SYSMGT2 0x69 -#define DEV_ENTRY_INIT_PASS 0xb8 -#define DEV_ENTRY_EINT_PASS 0xb9 -#define DEV_ENTRY_NMI_PASS 0xba -#define DEV_ENTRY_LINT0_PASS 0xbe -#define DEV_ENTRY_LINT1_PASS 0xbf -#define DEV_ENTRY_MODE_MASK 0x07 -#define DEV_ENTRY_MODE_SHIFT 0x09 - -/* constants to configure the command buffer */ -#define CMD_BUFFER_SIZE 8192 -#define CMD_BUFFER_UNINITIALIZED 1 -#define CMD_BUFFER_ENTRIES 512 -#define MMIO_CMD_SIZE_SHIFT 56 -#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) - -/* constants for event buffer handling */ -#define EVT_BUFFER_SIZE 8192 /* 512 entries */ -#define EVT_LEN_MASK (0x9ULL << 56) - -#define PAGE_MODE_NONE 0x00 -#define PAGE_MODE_1_LEVEL 0x01 -#define PAGE_MODE_2_LEVEL 0x02 -#define PAGE_MODE_3_LEVEL 0x03 -#define PAGE_MODE_4_LEVEL 0x04 -#define PAGE_MODE_5_LEVEL 0x05 -#define PAGE_MODE_6_LEVEL 0x06 - -#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) -#define PM_LEVEL_SIZE(x) (((x) < 6) ? \ - ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ - (0xffffffffffffffffULL)) -#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) -#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) -#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ - IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) -#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) - -#define PM_MAP_4k 0 -#define PM_ADDR_MASK 0x000ffffffffff000ULL -#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ - (~((1ULL << (12 + ((lvl) * 9))) - 1))) -#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) - -/* - * Returns the page table level to use for a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_LEVEL(pagesize) \ - ((__ffs(pagesize) - 12) / 9) -/* - * Returns the number of ptes to use for a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_PTE_COUNT(pagesize) \ - (1ULL << ((__ffs(pagesize) - 12) % 9)) - -/* - * Aligns a given io-virtual address to a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_ALIGN(address, pagesize) \ - ((address) & ~((pagesize) - 1)) -/* - * Creates an IOMMU PTE for an address an a given pagesize - * The PTE has no permission bits set - * Pagesize is expected to be a power-of-two larger than 4096 - */ -#define PAGE_SIZE_PTE(address, pagesize) \ - (((address) | ((pagesize) - 1)) & \ - (~(pagesize >> 1)) & PM_ADDR_MASK) - -/* - * Takes a PTE value with mode=0x07 and returns the page size it maps - */ -#define PTE_PAGE_SIZE(pte) \ - (1ULL << (1 + ffz(((pte) | 0xfffULL)))) - -#define IOMMU_PTE_P (1ULL << 0) -#define IOMMU_PTE_TV (1ULL << 1) -#define IOMMU_PTE_U (1ULL << 59) -#define IOMMU_PTE_FC (1ULL << 60) -#define IOMMU_PTE_IR (1ULL << 61) -#define IOMMU_PTE_IW (1ULL << 62) - -#define DTE_FLAG_IOTLB 0x01 - -#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) -#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) -#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) -#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) - -#define IOMMU_PROT_MASK 0x03 -#define IOMMU_PROT_IR 0x01 -#define IOMMU_PROT_IW 0x02 - -/* IOMMU capabilities */ -#define IOMMU_CAP_IOTLB 24 -#define IOMMU_CAP_NPCACHE 26 -#define IOMMU_CAP_EFR 27 - -#define MAX_DOMAIN_ID 65536 - -/* FIXME: move this macro to <linux/pci.h> */ -#define PCI_BUS(x) (((x) >> 8) & 0xff) - -/* Protection domain flags */ -#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ -#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops - domain for an IOMMU */ -#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page - translation */ - -extern bool amd_iommu_dump; -#define DUMP_printk(format, arg...) \ - do { \ - if (amd_iommu_dump) \ - printk(KERN_INFO "AMD-Vi: " format, ## arg); \ - } while(0); - -/* global flag if IOMMUs cache non-present entries */ -extern bool amd_iommu_np_cache; -/* Only true if all IOMMUs support device IOTLBs */ -extern bool amd_iommu_iotlb_sup; - -/* - * Make iterating over all IOMMUs easier - */ -#define for_each_iommu(iommu) \ - list_for_each_entry((iommu), &amd_iommu_list, list) -#define for_each_iommu_safe(iommu, next) \ - list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) - -#define APERTURE_RANGE_SHIFT 27 /* 128 MB */ -#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) -#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) -#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ -#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) -#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) - -/* - * This structure contains generic data for IOMMU protection domains - * independent of their use. - */ -struct protection_domain { - struct list_head list; /* for list of all protection domains */ - struct list_head dev_list; /* List of all devices in this domain */ - spinlock_t lock; /* mostly used to lock the page table*/ - struct mutex api_lock; /* protect page tables in the iommu-api path */ - u16 id; /* the domain id written to the device table */ - int mode; /* paging mode (0-6 levels) */ - u64 *pt_root; /* page table root pointer */ - unsigned long flags; /* flags to find out type of domain */ - bool updated; /* complete domain flush required */ - unsigned dev_cnt; /* devices assigned to this domain */ - unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ - void *priv; /* private data */ - -}; - -/* - * This struct contains device specific data for the IOMMU - */ -struct iommu_dev_data { - struct list_head list; /* For domain->dev_list */ - struct device *dev; /* Device this data belong to */ - struct device *alias; /* The Alias Device */ - struct protection_domain *domain; /* Domain the device is bound to */ - atomic_t bind; /* Domain attach reverent count */ -}; - -/* - * For dynamic growth the aperture size is split into ranges of 128MB of - * DMA address space each. This struct represents one such range. - */ -struct aperture_range { - - /* address allocation bitmap */ - unsigned long *bitmap; - - /* - * Array of PTE pages for the aperture. In this array we save all the - * leaf pages of the domain page table used for the aperture. This way - * we don't need to walk the page table to find a specific PTE. We can - * just calculate its address in constant time. - */ - u64 *pte_pages[64]; - - unsigned long offset; -}; - -/* - * Data container for a dma_ops specific protection domain - */ -struct dma_ops_domain { - struct list_head list; - - /* generic protection domain information */ - struct protection_domain domain; - - /* size of the aperture for the mappings */ - unsigned long aperture_size; - - /* address we start to search for free addresses */ - unsigned long next_address; - - /* address space relevant data */ - struct aperture_range *aperture[APERTURE_MAX_RANGES]; - - /* This will be set to true when TLB needs to be flushed */ - bool need_flush; - - /* - * if this is a preallocated domain, keep the device for which it was - * preallocated in this variable - */ - u16 target_dev; -}; - -/* - * Structure where we save information about one hardware AMD IOMMU in the - * system. - */ -struct amd_iommu { - struct list_head list; - - /* Index within the IOMMU array */ - int index; - - /* locks the accesses to the hardware */ - spinlock_t lock; - - /* Pointer to PCI device of this IOMMU */ - struct pci_dev *dev; - - /* physical address of MMIO space */ - u64 mmio_phys; - /* virtual address of MMIO space */ - u8 *mmio_base; - - /* capabilities of that IOMMU read from ACPI */ - u32 cap; - - /* flags read from acpi table */ - u8 acpi_flags; - - /* Extended features */ - u64 features; - - /* - * Capability pointer. There could be more than one IOMMU per PCI - * device function if there are more than one AMD IOMMU capability - * pointers. - */ - u16 cap_ptr; - - /* pci domain of this IOMMU */ - u16 pci_seg; - - /* first device this IOMMU handles. read from PCI */ - u16 first_device; - /* last device this IOMMU handles. read from PCI */ - u16 last_device; - - /* start of exclusion range of that IOMMU */ - u64 exclusion_start; - /* length of exclusion range of that IOMMU */ - u64 exclusion_length; - - /* command buffer virtual address */ - u8 *cmd_buf; - /* size of command buffer */ - u32 cmd_buf_size; - - /* size of event buffer */ - u32 evt_buf_size; - /* event buffer virtual address */ - u8 *evt_buf; - /* MSI number for event interrupt */ - u16 evt_msi_num; - - /* true if interrupts for this IOMMU are already enabled */ - bool int_enabled; - - /* if one, we need to send a completion wait command */ - bool need_sync; - - /* default dma_ops domain for that IOMMU */ - struct dma_ops_domain *default_dom; - - /* - * We can't rely on the BIOS to restore all values on reinit, so we - * need to stash them - */ - - /* The iommu BAR */ - u32 stored_addr_lo; - u32 stored_addr_hi; - - /* - * Each iommu has 6 l1s, each of which is documented as having 0x12 - * registers - */ - u32 stored_l1[6][0x12]; - - /* The l2 indirect registers */ - u32 stored_l2[0x83]; -}; - -/* - * List with all IOMMUs in the system. This list is not locked because it is - * only written and read at driver initialization or suspend time - */ -extern struct list_head amd_iommu_list; - -/* - * Array with pointers to each IOMMU struct - * The indices are referenced in the protection domains - */ -extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; - -/* Number of IOMMUs present in the system */ -extern int amd_iommus_present; - -/* - * Declarations for the global list of all protection domains - */ -extern spinlock_t amd_iommu_pd_lock; -extern struct list_head amd_iommu_pd_list; - -/* - * Structure defining one entry in the device table - */ -struct dev_table_entry { - u32 data[8]; -}; - -/* - * One entry for unity mappings parsed out of the ACPI table. - */ -struct unity_map_entry { - struct list_head list; - - /* starting device id this entry is used for (including) */ - u16 devid_start; - /* end device id this entry is used for (including) */ - u16 devid_end; - - /* start address to unity map (including) */ - u64 address_start; - /* end address to unity map (including) */ - u64 address_end; - - /* required protection */ - int prot; -}; - -/* - * List of all unity mappings. It is not locked because as runtime it is only - * read. It is created at ACPI table parsing time. - */ -extern struct list_head amd_iommu_unity_map; - -/* - * Data structures for device handling - */ - -/* - * Device table used by hardware. Read and write accesses by software are - * locked with the amd_iommu_pd_table lock. - */ -extern struct dev_table_entry *amd_iommu_dev_table; - -/* - * Alias table to find requestor ids to device ids. Not locked because only - * read on runtime. - */ -extern u16 *amd_iommu_alias_table; - -/* - * Reverse lookup table to find the IOMMU which translates a specific device. - */ -extern struct amd_iommu **amd_iommu_rlookup_table; - -/* size of the dma_ops aperture as power of 2 */ -extern unsigned amd_iommu_aperture_order; - -/* largest PCI device id we expect translation requests for */ -extern u16 amd_iommu_last_bdf; - -/* allocation bitmap for domain ids */ -extern unsigned long *amd_iommu_pd_alloc_bitmap; - -/* - * If true, the addresses will be flushed on unmap time, not when - * they are reused - */ -extern bool amd_iommu_unmap_flush; - -/* takes bus and device/function and returns the device id - * FIXME: should that be in generic PCI code? */ -static inline u16 calc_devid(u8 bus, u8 devfn) -{ - return (((u16)bus) << 8) | devfn; -} - -#ifdef CONFIG_AMD_IOMMU_STATS - -struct __iommu_counter { - char *name; - struct dentry *dent; - u64 value; -}; - -#define DECLARE_STATS_COUNTER(nm) \ - static struct __iommu_counter nm = { \ - .name = #nm, \ - } - -#define INC_STATS_COUNTER(name) name.value += 1 -#define ADD_STATS_COUNTER(name, x) name.value += (x) -#define SUB_STATS_COUNTER(name, x) name.value -= (x) - -#else /* CONFIG_AMD_IOMMU_STATS */ - -#define DECLARE_STATS_COUNTER(name) -#define INC_STATS_COUNTER(name) -#define ADD_STATS_COUNTER(name, x) -#define SUB_STATS_COUNTER(name, x) - -#endif /* CONFIG_AMD_IOMMU_STATS */ - -#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 5745ce8bf10..bba3cf88e62 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -60,23 +60,24 @@ static inline void native_halt(void) #include <asm/paravirt.h> #else #ifndef __ASSEMBLY__ +#include <linux/types.h> -static inline unsigned long arch_local_save_flags(void) +static inline notrace unsigned long arch_local_save_flags(void) { return native_save_fl(); } -static inline void arch_local_irq_restore(unsigned long flags) +static inline notrace void arch_local_irq_restore(unsigned long flags) { native_restore_fl(flags); } -static inline void arch_local_irq_disable(void) +static inline notrace void arch_local_irq_disable(void) { native_irq_disable(); } -static inline void arch_local_irq_enable(void) +static inline notrace void arch_local_irq_enable(void) { native_irq_enable(); } @@ -102,7 +103,7 @@ static inline void halt(void) /* * For spinlocks, etc: */ -static inline unsigned long arch_local_irq_save(void) +static inline notrace unsigned long arch_local_irq_save(void) { unsigned long flags = arch_local_save_flags(); arch_local_irq_disable(); diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index b60f2924c41..879fd7d3387 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h @@ -61,6 +61,7 @@ hcall(unsigned long call, : "memory"); return call; } +/*:*/ /* Can't use our min() macro here: needs to be a constant */ #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index a0a9779084d..3470c9d0ebb 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -388,12 +388,9 @@ do { \ #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) -/* - * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much - * faster than an xchg with forced lock semantics. - */ -#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) -#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) +#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val) +#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val) +#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val) #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) @@ -485,6 +482,8 @@ do { \ #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) #define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) +#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) +#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index d9d4dae305f..094fb30817a 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -152,6 +152,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); (regs)->bp = caller_frame_pointer(); \ (regs)->cs = __KERNEL_CS; \ regs->flags = 0; \ + asm volatile( \ + _ASM_MOV "%%"_ASM_SP ", %0\n" \ + : "=m" ((regs)->sp) \ + :: "memory" \ + ); \ } #else diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index 56fd9e3abbd..4f7e67e2345 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h @@ -102,6 +102,14 @@ #define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT) /* + * If an event has alias it should be marked + * with a special bit. (Don't forget to check + * P4_PEBS_CONFIG_MASK and related bits on + * modification.) + */ +#define P4_CONFIG_ALIASABLE (1 << 9) + +/* * The bits we allow to pass for RAW events */ #define P4_CONFIG_MASK_ESCR \ @@ -123,6 +131,31 @@ (p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \ (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR)) +/* + * In case of event aliasing we need to preserve some + * caller bits, otherwise the mapping won't be complete. + */ +#define P4_CONFIG_EVENT_ALIAS_MASK \ + (p4_config_pack_escr(P4_CONFIG_MASK_ESCR) | \ + p4_config_pack_cccr(P4_CCCR_EDGE | \ + P4_CCCR_THRESHOLD_MASK | \ + P4_CCCR_COMPLEMENT | \ + P4_CCCR_COMPARE)) + +#define P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS \ + ((P4_CONFIG_HT) | \ + p4_config_pack_escr(P4_ESCR_T0_OS | \ + P4_ESCR_T0_USR | \ + P4_ESCR_T1_OS | \ + P4_ESCR_T1_USR) | \ + p4_config_pack_cccr(P4_CCCR_OVF | \ + P4_CCCR_CASCADE | \ + P4_CCCR_FORCE_OVF | \ + P4_CCCR_THREAD_ANY | \ + P4_CCCR_OVF_PMI_T0 | \ + P4_CCCR_OVF_PMI_T1 | \ + P4_CONFIG_ALIASABLE)) + static inline bool p4_is_event_cascaded(u64 config) { u32 cccr = p4_config_unpack_cccr(config); diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h index 971e0b46446..df1287019e6 100644 --- a/arch/x86/include/asm/prom.h +++ b/arch/x86/include/asm/prom.h @@ -30,17 +30,6 @@ extern void add_dtb(u64 data); extern void x86_add_irq_domains(void); void __cpuinit x86_of_pci_init(void); void x86_dtb_init(void); - -static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) -{ - return pdev ? pdev->dev.of_node : NULL; -} - -static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) -{ - return pci_device_to_OF_node(bus->self); -} - #else static inline void add_dtb(u64 data) { } static inline void x86_add_irq_domains(void) { } diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 99ddd148a76..36361bf6fdd 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -555,6 +555,9 @@ struct __large_struct { unsigned long buf[100]; }; #endif /* CONFIG_X86_WP_WORKS_OK */ +extern unsigned long +copy_from_user_nmi(void *to, const void __user *from, unsigned long n); + /* * movsl can be slow when source and dest are not both 8-byte aligned */ diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h index 4fbda9a3f33..968d57dd54c 100644 --- a/arch/x86/include/asm/xen/pci.h +++ b/arch/x86/include/asm/xen/pci.h @@ -14,13 +14,14 @@ static inline int pci_xen_hvm_init(void) } #endif #if defined(CONFIG_XEN_DOM0) -void __init xen_setup_pirqs(void); +int __init pci_xen_initial_domain(void); int xen_find_device_domain_owner(struct pci_dev *dev); int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain); int xen_unregister_device_domain_owner(struct pci_dev *dev); #else -static inline void __init xen_setup_pirqs(void) +static inline int __init pci_xen_initial_domain(void) { + return -1; } static inline int xen_find_device_domain_owner(struct pci_dev *dev) { diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 90b06d4daee..11817ff8539 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -123,7 +123,6 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o - obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o obj-y += vsmp_64.o diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c deleted file mode 100644 index 7c3a95e54ec..00000000000 --- a/arch/x86/kernel/amd_iommu.c +++ /dev/null @@ -1,2764 +0,0 @@ -/* - * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <joerg.roedel@amd.com> - * Leo Duran <leo.duran@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include <linux/pci.h> -#include <linux/pci-ats.h> -#include <linux/bitmap.h> -#include <linux/slab.h> -#include <linux/debugfs.h> -#include <linux/scatterlist.h> -#include <linux/dma-mapping.h> -#include <linux/iommu-helper.h> -#include <linux/iommu.h> -#include <linux/delay.h> -#include <asm/proto.h> -#include <asm/iommu.h> -#include <asm/gart.h> -#include <asm/dma.h> -#include <asm/amd_iommu_proto.h> -#include <asm/amd_iommu_types.h> -#include <asm/amd_iommu.h> - -#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) - -#define LOOP_TIMEOUT 100000 - -static DEFINE_RWLOCK(amd_iommu_devtable_lock); - -/* A list of preallocated protection domains */ -static LIST_HEAD(iommu_pd_list); -static DEFINE_SPINLOCK(iommu_pd_list_lock); - -/* - * Domain for untranslated devices - only allocated - * if iommu=pt passed on kernel cmd line. - */ -static struct protection_domain *pt_domain; - -static struct iommu_ops amd_iommu_ops; - -/* - * general struct to manage commands send to an IOMMU - */ -struct iommu_cmd { - u32 data[4]; -}; - -static void update_domain(struct protection_domain *domain); - -/**************************************************************************** - * - * Helper functions - * - ****************************************************************************/ - -static inline u16 get_device_id(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return calc_devid(pdev->bus->number, pdev->devfn); -} - -static struct iommu_dev_data *get_dev_data(struct device *dev) -{ - return dev->archdata.iommu; -} - -/* - * In this function the list of preallocated protection domains is traversed to - * find the domain for a specific device - */ -static struct dma_ops_domain *find_protection_domain(u16 devid) -{ - struct dma_ops_domain *entry, *ret = NULL; - unsigned long flags; - u16 alias = amd_iommu_alias_table[devid]; - - if (list_empty(&iommu_pd_list)) - return NULL; - - spin_lock_irqsave(&iommu_pd_list_lock, flags); - - list_for_each_entry(entry, &iommu_pd_list, list) { - if (entry->target_dev == devid || - entry->target_dev == alias) { - ret = entry; - break; - } - } - - spin_unlock_irqrestore(&iommu_pd_list_lock, flags); - - return ret; -} - -/* - * This function checks if the driver got a valid device from the caller to - * avoid dereferencing invalid pointers. - */ -static bool check_device(struct device *dev) -{ - u16 devid; - - if (!dev || !dev->dma_mask) - return false; - - /* No device or no PCI device */ - if (dev->bus != &pci_bus_type) - return false; - - devid = get_device_id(dev); - - /* Out of our scope? */ - if (devid > amd_iommu_last_bdf) - return false; - - if (amd_iommu_rlookup_table[devid] == NULL) - return false; - - return true; -} - -static int iommu_init_device(struct device *dev) -{ - struct iommu_dev_data *dev_data; - struct pci_dev *pdev; - u16 devid, alias; - - if (dev->archdata.iommu) - return 0; - - dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); - if (!dev_data) - return -ENOMEM; - - dev_data->dev = dev; - - devid = get_device_id(dev); - alias = amd_iommu_alias_table[devid]; - pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); - if (pdev) - dev_data->alias = &pdev->dev; - else { - kfree(dev_data); - return -ENOTSUPP; - } - - atomic_set(&dev_data->bind, 0); - - dev->archdata.iommu = dev_data; - - - return 0; -} - -static void iommu_ignore_device(struct device *dev) -{ - u16 devid, alias; - - devid = get_device_id(dev); - alias = amd_iommu_alias_table[devid]; - - memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); - memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); - - amd_iommu_rlookup_table[devid] = NULL; - amd_iommu_rlookup_table[alias] = NULL; -} - -static void iommu_uninit_device(struct device *dev) -{ - kfree(dev->archdata.iommu); -} - -void __init amd_iommu_uninit_devices(void) -{ - struct pci_dev *pdev = NULL; - - for_each_pci_dev(pdev) { - - if (!check_device(&pdev->dev)) - continue; - - iommu_uninit_device(&pdev->dev); - } -} - -int __init amd_iommu_init_devices(void) -{ - struct pci_dev *pdev = NULL; - int ret = 0; - - for_each_pci_dev(pdev) { - - if (!check_device(&pdev->dev)) - continue; - - ret = iommu_init_device(&pdev->dev); - if (ret == -ENOTSUPP) - iommu_ignore_device(&pdev->dev); - else if (ret) - goto out_free; - } - - return 0; - -out_free: - - amd_iommu_uninit_devices(); - - return ret; -} -#ifdef CONFIG_AMD_IOMMU_STATS - -/* - * Initialization code for statistics collection - */ - -DECLARE_STATS_COUNTER(compl_wait); -DECLARE_STATS_COUNTER(cnt_map_single); -DECLARE_STATS_COUNTER(cnt_unmap_single); -DECLARE_STATS_COUNTER(cnt_map_sg); -DECLARE_STATS_COUNTER(cnt_unmap_sg); -DECLARE_STATS_COUNTER(cnt_alloc_coherent); -DECLARE_STATS_COUNTER(cnt_free_coherent); -DECLARE_STATS_COUNTER(cross_page); -DECLARE_STATS_COUNTER(domain_flush_single); -DECLARE_STATS_COUNTER(domain_flush_all); -DECLARE_STATS_COUNTER(alloced_io_mem); -DECLARE_STATS_COUNTER(total_map_requests); - -static struct dentry *stats_dir; -static struct dentry *de_fflush; - -static void amd_iommu_stats_add(struct __iommu_counter *cnt) -{ - if (stats_dir == NULL) - return; - - cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir, - &cnt->value); -} - -static void amd_iommu_stats_init(void) -{ - stats_dir = debugfs_create_dir("amd-iommu", NULL); - if (stats_dir == NULL) - return; - - de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, - (u32 *)&amd_iommu_unmap_flush); - - amd_iommu_stats_add(&compl_wait); - amd_iommu_stats_add(&cnt_map_single); - amd_iommu_stats_add(&cnt_unmap_single); - amd_iommu_stats_add(&cnt_map_sg); - amd_iommu_stats_add(&cnt_unmap_sg); - amd_iommu_stats_add(&cnt_alloc_coherent); - amd_iommu_stats_add(&cnt_free_coherent); - amd_iommu_stats_add(&cross_page); - amd_iommu_stats_add(&domain_flush_single); - amd_iommu_stats_add(&domain_flush_all); - amd_iommu_stats_add(&alloced_io_mem); - amd_iommu_stats_add(&total_map_requests); -} - -#endif - -/**************************************************************************** - * - * Interrupt handling functions - * - ****************************************************************************/ - -static void dump_dte_entry(u16 devid) -{ - int i; - - for (i = 0; i < 8; ++i) - pr_err("AMD-Vi: DTE[%d]: %08x\n", i, - amd_iommu_dev_table[devid].data[i]); -} - -static void dump_command(unsigned long phys_addr) -{ - struct iommu_cmd *cmd = phys_to_virt(phys_addr); - int i; - - for (i = 0; i < 4; ++i) - pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]); -} - -static void iommu_print_event(struct amd_iommu *iommu, void *__evt) -{ - u32 *event = __evt; - int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; - int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; - int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK; - int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; - u64 address = (u64)(((u64)event[3]) << 32) | event[2]; - - printk(KERN_ERR "AMD-Vi: Event logged ["); - - switch (type) { - case EVENT_TYPE_ILL_DEV: - printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x " - "address=0x%016llx flags=0x%04x]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address, flags); - dump_dte_entry(devid); - break; - case EVENT_TYPE_IO_FAULT: - printk("IO_PAGE_FAULT device=%02x:%02x.%x " - "domain=0x%04x address=0x%016llx flags=0x%04x]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - domid, address, flags); - break; - case EVENT_TYPE_DEV_TAB_ERR: - printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " - "address=0x%016llx flags=0x%04x]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address, flags); - break; - case EVENT_TYPE_PAGE_TAB_ERR: - printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x " - "domain=0x%04x address=0x%016llx flags=0x%04x]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - domid, address, flags); - break; - case EVENT_TYPE_ILL_CMD: - printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); - dump_command(address); - break; - case EVENT_TYPE_CMD_HARD_ERR: - printk("COMMAND_HARDWARE_ERROR address=0x%016llx " - "flags=0x%04x]\n", address, flags); - break; - case EVENT_TYPE_IOTLB_INV_TO: - printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x " - "address=0x%016llx]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address); - break; - case EVENT_TYPE_INV_DEV_REQ: - printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x " - "address=0x%016llx flags=0x%04x]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address, flags); - break; - default: - printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type); - } -} - -static void iommu_poll_events(struct amd_iommu *iommu) -{ - u32 head, tail; - unsigned long flags; - - spin_lock_irqsave(&iommu->lock, flags); - - head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); - - while (head != tail) { - iommu_print_event(iommu, iommu->evt_buf + head); - head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; - } - - writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); - - spin_unlock_irqrestore(&iommu->lock, flags); -} - -irqreturn_t amd_iommu_int_thread(int irq, void *data) -{ - struct amd_iommu *iommu; - - for_each_iommu(iommu) - iommu_poll_events(iommu); - - return IRQ_HANDLED; -} - -irqreturn_t amd_iommu_int_handler(int irq, void *data) -{ - return IRQ_WAKE_THREAD; -} - -/**************************************************************************** - * - * IOMMU command queuing functions - * - ****************************************************************************/ - -static int wait_on_sem(volatile u64 *sem) -{ - int i = 0; - - while (*sem == 0 && i < LOOP_TIMEOUT) { - udelay(1); - i += 1; - } - - if (i == LOOP_TIMEOUT) { - pr_alert("AMD-Vi: Completion-Wait loop timed out\n"); - return -EIO; - } - - return 0; -} - -static void copy_cmd_to_buffer(struct amd_iommu *iommu, - struct iommu_cmd *cmd, - u32 tail) -{ - u8 *target; - - target = iommu->cmd_buf + tail; - tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; - - /* Copy command to buffer */ - memcpy(target, cmd, sizeof(*cmd)); - - /* Tell the IOMMU about it */ - writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); -} - -static void build_completion_wait(struct iommu_cmd *cmd, u64 address) -{ - WARN_ON(address & 0x7ULL); - - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK; - cmd->data[1] = upper_32_bits(__pa(address)); - cmd->data[2] = 1; - CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); -} - -static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) -{ - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = devid; - CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); -} - -static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, - size_t size, u16 domid, int pde) -{ - u64 pages; - int s; - - pages = iommu_num_pages(address, size, PAGE_SIZE); - s = 0; - - if (pages > 1) { - /* - * If we have to flush more than one page, flush all - * TLB entries for this domain - */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = 1; - } - - address &= PAGE_MASK; - - memset(cmd, 0, sizeof(*cmd)); - cmd->data[1] |= domid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); - CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); - if (s) /* size bit - we flush more than one 4kb page */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; - if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; -} - -static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, - u64 address, size_t size) -{ - u64 pages; - int s; - - pages = iommu_num_pages(address, size, PAGE_SIZE); - s = 0; - - if (pages > 1) { - /* - * If we have to flush more than one page, flush all - * TLB entries for this domain - */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = 1; - } - - address &= PAGE_MASK; - - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = devid; - cmd->data[0] |= (qdep & 0xff) << 24; - cmd->data[1] = devid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); - CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); - if (s) - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; -} - -static void build_inv_all(struct iommu_cmd *cmd) -{ - memset(cmd, 0, sizeof(*cmd)); - CMD_SET_TYPE(cmd, CMD_INV_ALL); -} - -/* - * Writes the command to the IOMMUs command buffer and informs the - * hardware about the new command. - */ -static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) -{ - u32 left, tail, head, next_tail; - unsigned long flags; - - WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); - -again: - spin_lock_irqsave(&iommu->lock, flags); - - head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); - next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; - left = (head - next_tail) % iommu->cmd_buf_size; - - if (left <= 2) { - struct iommu_cmd sync_cmd; - volatile u64 sem = 0; - int ret; - - build_completion_wait(&sync_cmd, (u64)&sem); - copy_cmd_to_buffer(iommu, &sync_cmd, tail); - - spin_unlock_irqrestore(&iommu->lock, flags); - - if ((ret = wait_on_sem(&sem)) != 0) - return ret; - - goto again; - } - - copy_cmd_to_buffer(iommu, cmd, tail); - - /* We need to sync now to make sure all commands are processed */ - iommu->need_sync = true; - - spin_unlock_irqrestore(&iommu->lock, flags); - - return 0; -} - -/* - * This function queues a completion wait command into the command - * buffer of an IOMMU - */ -static int iommu_completion_wait(struct amd_iommu *iommu) -{ - struct iommu_cmd cmd; - volatile u64 sem = 0; - int ret; - - if (!iommu->need_sync) - return 0; - - build_completion_wait(&cmd, (u64)&sem); - - ret = iommu_queue_command(iommu, &cmd); - if (ret) - return ret; - - return wait_on_sem(&sem); -} - -static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) -{ - struct iommu_cmd cmd; - - build_inv_dte(&cmd, devid); - - return iommu_queue_command(iommu, &cmd); -} - -static void iommu_flush_dte_all(struct amd_iommu *iommu) -{ - u32 devid; - - for (devid = 0; devid <= 0xffff; ++devid) - iommu_flush_dte(iommu, devid); - - iommu_completion_wait(iommu); -} - -/* - * This function uses heavy locking and may disable irqs for some time. But - * this is no issue because it is only called during resume. - */ -static void iommu_flush_tlb_all(struct amd_iommu *iommu) -{ - u32 dom_id; - - for (dom_id = 0; dom_id <= 0xffff; ++dom_id) { - struct iommu_cmd cmd; - build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, - dom_id, 1); - iommu_queue_command(iommu, &cmd); - } - - iommu_completion_wait(iommu); -} - -static void iommu_flush_all(struct amd_iommu *iommu) -{ - struct iommu_cmd cmd; - - build_inv_all(&cmd); - - iommu_queue_command(iommu, &cmd); - iommu_completion_wait(iommu); -} - -void iommu_flush_all_caches(struct amd_iommu *iommu) -{ - if (iommu_feature(iommu, FEATURE_IA)) { - iommu_flush_all(iommu); - } else { - iommu_flush_dte_all(iommu); - iommu_flush_tlb_all(iommu); - } -} - -/* - * Command send function for flushing on-device TLB - */ -static int device_flush_iotlb(struct device *dev, u64 address, size_t size) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct amd_iommu *iommu; - struct iommu_cmd cmd; - u16 devid; - int qdep; - - qdep = pci_ats_queue_depth(pdev); - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; - - build_inv_iotlb_pages(&cmd, devid, qdep, address, size); - - return iommu_queue_command(iommu, &cmd); -} - -/* - * Command send function for invalidating a device table entry - */ -static int device_flush_dte(struct device *dev) -{ - struct amd_iommu *iommu; - struct pci_dev *pdev; - u16 devid; - int ret; - - pdev = to_pci_dev(dev); - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; - - ret = iommu_flush_dte(iommu, devid); - if (ret) - return ret; - - if (pci_ats_enabled(pdev)) - ret = device_flush_iotlb(dev, 0, ~0UL); - - return ret; -} - -/* - * TLB invalidation function which is called from the mapping functions. - * It invalidates a single PTE if the range to flush is within a single - * page. Otherwise it flushes the whole TLB of the IOMMU. - */ -static void __domain_flush_pages(struct protection_domain *domain, - u64 address, size_t size, int pde) -{ - struct iommu_dev_data *dev_data; - struct iommu_cmd cmd; - int ret = 0, i; - - build_inv_iommu_pages(&cmd, address, size, domain->id, pde); - - for (i = 0; i < amd_iommus_present; ++i) { - if (!domain->dev_iommu[i]) - continue; - - /* - * Devices of this domain are behind this IOMMU - * We need a TLB flush - */ - ret |= iommu_queue_command(amd_iommus[i], &cmd); - } - - list_for_each_entry(dev_data, &domain->dev_list, list) { - struct pci_dev *pdev = to_pci_dev(dev_data->dev); - - if (!pci_ats_enabled(pdev)) - continue; - - ret |= device_flush_iotlb(dev_data->dev, address, size); - } - - WARN_ON(ret); -} - -static void domain_flush_pages(struct protection_domain *domain, - u64 address, size_t size) -{ - __domain_flush_pages(domain, address, size, 0); -} - -/* Flush the whole IO/TLB for a given protection domain */ -static void domain_flush_tlb(struct protection_domain *domain) -{ - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); -} - -/* Flush the whole IO/TLB for a given protection domain - including PDE */ -static void domain_flush_tlb_pde(struct protection_domain *domain) -{ - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); -} - -static void domain_flush_complete(struct protection_domain *domain) -{ - int i; - - for (i = 0; i < amd_iommus_present; ++i) { - if (!domain->dev_iommu[i]) - continue; - - /* - * Devices of this domain are behind this IOMMU - * We need to wait for completion of all commands. - */ - iommu_completion_wait(amd_iommus[i]); - } -} - - -/* - * This function flushes the DTEs for all devices in domain - */ -static void domain_flush_devices(struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data; - unsigned long flags; - - spin_lock_irqsave(&domain->lock, flags); - - list_for_each_entry(dev_data, &domain->dev_list, list) - device_flush_dte(dev_data->dev); - - spin_unlock_irqrestore(&domain->lock, flags); -} - -/**************************************************************************** - * - * The functions below are used the create the page table mappings for - * unity mapped regions. - * - ****************************************************************************/ - -/* - * This function is used to add another level to an IO page table. Adding - * another level increases the size of the address space by 9 bits to a size up - * to 64 bits. - */ -static bool increase_address_space(struct protection_domain *domain, - gfp_t gfp) -{ - u64 *pte; - - if (domain->mode == PAGE_MODE_6_LEVEL) - /* address space already 64 bit large */ - return false; - - pte = (void *)get_zeroed_page(gfp); - if (!pte) - return false; - - *pte = PM_LEVEL_PDE(domain->mode, - virt_to_phys(domain->pt_root)); - domain->pt_root = pte; - domain->mode += 1; - domain->updated = true; - - return true; -} - -static u64 *alloc_pte(struct protection_domain *domain, - unsigned long address, - unsigned long page_size, - u64 **pte_page, - gfp_t gfp) -{ - int level, end_lvl; - u64 *pte, *page; - - BUG_ON(!is_power_of_2(page_size)); - - while (address > PM_LEVEL_SIZE(domain->mode)) - increase_address_space(domain, gfp); - - level = domain->mode - 1; - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; - address = PAGE_SIZE_ALIGN(address, page_size); - end_lvl = PAGE_SIZE_LEVEL(page_size); - - while (level > end_lvl) { - if (!IOMMU_PTE_PRESENT(*pte)) { - page = (u64 *)get_zeroed_page(gfp); - if (!page) - return NULL; - *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); - } - - /* No level skipping support yet */ - if (PM_PTE_LEVEL(*pte) != level) - return NULL; - - level -= 1; - - pte = IOMMU_PTE_PAGE(*pte); - - if (pte_page && level == end_lvl) - *pte_page = pte; - - pte = &pte[PM_LEVEL_INDEX(level, address)]; - } - - return pte; -} - -/* - * This function checks if there is a PTE for a given dma address. If - * there is one, it returns the pointer to it. - */ -static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) -{ - int level; - u64 *pte; - - if (address > PM_LEVEL_SIZE(domain->mode)) - return NULL; - - level = domain->mode - 1; - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; - - while (level > 0) { - - /* Not Present */ - if (!IOMMU_PTE_PRESENT(*pte)) - return NULL; - - /* Large PTE */ - if (PM_PTE_LEVEL(*pte) == 0x07) { - unsigned long pte_mask, __pte; - - /* - * If we have a series of large PTEs, make - * sure to return a pointer to the first one. - */ - pte_mask = PTE_PAGE_SIZE(*pte); - pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); - __pte = ((unsigned long)pte) & pte_mask; - - return (u64 *)__pte; - } - - /* No level skipping support yet */ - if (PM_PTE_LEVEL(*pte) != level) - return NULL; - - level -= 1; - - /* Walk to the next level */ - pte = IOMMU_PTE_PAGE(*pte); - pte = &pte[PM_LEVEL_INDEX(level, address)]; - } - - return pte; -} - -/* - * Generic mapping functions. It maps a physical address into a DMA - * address space. It allocates the page table pages if necessary. - * In the future it can be extended to a generic mapping function - * supporting all features of AMD IOMMU page tables like level skipping - * and full 64 bit address spaces. - */ -static int iommu_map_page(struct protection_domain *dom, - unsigned long bus_addr, - unsigned long phys_addr, - int prot, - unsigned long page_size) -{ - u64 __pte, *pte; - int i, count; - - if (!(prot & IOMMU_PROT_MASK)) - return -EINVAL; - - bus_addr = PAGE_ALIGN(bus_addr); - phys_addr = PAGE_ALIGN(phys_addr); - count = PAGE_SIZE_PTE_COUNT(page_size); - pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); - - for (i = 0; i < count; ++i) - if (IOMMU_PTE_PRESENT(pte[i])) - return -EBUSY; - - if (page_size > PAGE_SIZE) { - __pte = PAGE_SIZE_PTE(phys_addr, page_size); - __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC; - } else - __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC; - - if (prot & IOMMU_PROT_IR) - __pte |= IOMMU_PTE_IR; - if (prot & IOMMU_PROT_IW) - __pte |= IOMMU_PTE_IW; - - for (i = 0; i < count; ++i) - pte[i] = __pte; - - update_domain(dom); - - return 0; -} - -static unsigned long iommu_unmap_page(struct protection_domain *dom, - unsigned long bus_addr, - unsigned long page_size) -{ - unsigned long long unmap_size, unmapped; - u64 *pte; - - BUG_ON(!is_power_of_2(page_size)); - - unmapped = 0; - - while (unmapped < page_size) { - - pte = fetch_pte(dom, bus_addr); - - if (!pte) { - /* - * No PTE for this address - * move forward in 4kb steps - */ - unmap_size = PAGE_SIZE; - } else if (PM_PTE_LEVEL(*pte) == 0) { - /* 4kb PTE found for this address */ - unmap_size = PAGE_SIZE; - *pte = 0ULL; - } else { - int count, i; - - /* Large PTE found which maps this address */ - unmap_size = PTE_PAGE_SIZE(*pte); - count = PAGE_SIZE_PTE_COUNT(unmap_size); - for (i = 0; i < count; i++) - pte[i] = 0ULL; - } - - bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size; - unmapped += unmap_size; - } - - BUG_ON(!is_power_of_2(unmapped)); - - return unmapped; -} - -/* - * This function checks if a specific unity mapping entry is needed for - * this specific IOMMU. - */ -static int iommu_for_unity_map(struct amd_iommu *iommu, - struct unity_map_entry *entry) -{ - u16 bdf, i; - - for (i = entry->devid_start; i <= entry->devid_end; ++i) { - bdf = amd_iommu_alias_table[i]; - if (amd_iommu_rlookup_table[bdf] == iommu) - return 1; - } - - return 0; -} - -/* - * This function actually applies the mapping to the page table of the - * dma_ops domain. - */ -static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, - struct unity_map_entry *e) -{ - u64 addr; - int ret; - - for (addr = e->address_start; addr < e->address_end; - addr += PAGE_SIZE) { - ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, - PAGE_SIZE); - if (ret) - return ret; - /* - * if unity mapping is in aperture range mark the page - * as allocated in the aperture - */ - if (addr < dma_dom->aperture_size) - __set_bit(addr >> PAGE_SHIFT, - dma_dom->aperture[0]->bitmap); - } - - return 0; -} - -/* - * Init the unity mappings for a specific IOMMU in the system - * - * Basically iterates over all unity mapping entries and applies them to - * the default domain DMA of that IOMMU if necessary. - */ -static int iommu_init_unity_mappings(struct amd_iommu *iommu) -{ - struct unity_map_entry *entry; - int ret; - - list_for_each_entry(entry, &amd_iommu_unity_map, list) { - if (!iommu_for_unity_map(iommu, entry)) - continue; - ret = dma_ops_unity_map(iommu->default_dom, entry); - if (ret) - return ret; - } - - return 0; -} - -/* - * Inits the unity mappings required for a specific device - */ -static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, - u16 devid) -{ - struct unity_map_entry *e; - int ret; - - list_for_each_entry(e, &amd_iommu_unity_map, list) { - if (!(devid >= e->devid_start && devid <= e->devid_end)) - continue; - ret = dma_ops_unity_map(dma_dom, e); - if (ret) - return ret; - } - - return 0; -} - -/**************************************************************************** - * - * The next functions belong to the address allocator for the dma_ops - * interface functions. They work like the allocators in the other IOMMU - * drivers. Its basically a bitmap which marks the allocated pages in - * the aperture. Maybe it could be enhanced in the future to a more - * efficient allocator. - * - ****************************************************************************/ - -/* - * The address allocator core functions. - * - * called with domain->lock held - */ - -/* - * Used to reserve address ranges in the aperture (e.g. for exclusion - * ranges. - */ -static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, - unsigned long start_page, - unsigned int pages) -{ - unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT; - - if (start_page + pages > last_page) - pages = last_page - start_page; - - for (i = start_page; i < start_page + pages; ++i) { - int index = i / APERTURE_RANGE_PAGES; - int page = i % APERTURE_RANGE_PAGES; - __set_bit(page, dom->aperture[index]->bitmap); - } -} - -/* - * This function is used to add a new aperture range to an existing - * aperture in case of dma_ops domain allocation or address allocation - * failure. - */ -static int alloc_new_range(struct dma_ops_domain *dma_dom, - bool populate, gfp_t gfp) -{ - int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; - struct amd_iommu *iommu; - unsigned long i; - -#ifdef CONFIG_IOMMU_STRESS - populate = false; -#endif - - if (index >= APERTURE_MAX_RANGES) - return -ENOMEM; - - dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp); - if (!dma_dom->aperture[index]) - return -ENOMEM; - - dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp); - if (!dma_dom->aperture[index]->bitmap) - goto out_free; - - dma_dom->aperture[index]->offset = dma_dom->aperture_size; - - if (populate) { - unsigned long address = dma_dom->aperture_size; - int i, num_ptes = APERTURE_RANGE_PAGES / 512; - u64 *pte, *pte_page; - - for (i = 0; i < num_ptes; ++i) { - pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, - &pte_page, gfp); - if (!pte) - goto out_free; - - dma_dom->aperture[index]->pte_pages[i] = pte_page; - - address += APERTURE_RANGE_SIZE / 64; - } - } - - dma_dom->aperture_size += APERTURE_RANGE_SIZE; - - /* Initialize the exclusion range if necessary */ - for_each_iommu(iommu) { - if (iommu->exclusion_start && - iommu->exclusion_start >= dma_dom->aperture[index]->offset - && iommu->exclusion_start < dma_dom->aperture_size) { - unsigned long startpage; - int pages = iommu_num_pages(iommu->exclusion_start, - iommu->exclusion_length, - PAGE_SIZE); - startpage = iommu->exclusion_start >> PAGE_SHIFT; - dma_ops_reserve_addresses(dma_dom, startpage, pages); - } - } - - /* - * Check for areas already mapped as present in the new aperture - * range and mark those pages as reserved in the allocator. Such - * mappings may already exist as a result of requested unity - * mappings for devices. - */ - for (i = dma_dom->aperture[index]->offset; - i < dma_dom->aperture_size; - i += PAGE_SIZE) { - u64 *pte = fetch_pte(&dma_dom->domain, i); - if (!pte || !IOMMU_PTE_PRESENT(*pte)) - continue; - - dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); - } - - update_domain(&dma_dom->domain); - - return 0; - -out_free: - update_domain(&dma_dom->domain); - - free_page((unsigned long)dma_dom->aperture[index]->bitmap); - - kfree(dma_dom->aperture[index]); - dma_dom->aperture[index] = NULL; - - return -ENOMEM; -} - -static unsigned long dma_ops_area_alloc(struct device *dev, - struct dma_ops_domain *dom, - unsigned int pages, - unsigned long align_mask, - u64 dma_mask, - unsigned long start) -{ - unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE; - int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT; - int i = start >> APERTURE_RANGE_SHIFT; - unsigned long boundary_size; - unsigned long address = -1; - unsigned long limit; - - next_bit >>= PAGE_SHIFT; - - boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - PAGE_SIZE) >> PAGE_SHIFT; - - for (;i < max_index; ++i) { - unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT; - - if (dom->aperture[i]->offset >= dma_mask) - break; - - limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset, - dma_mask >> PAGE_SHIFT); - - address = iommu_area_alloc(dom->aperture[i]->bitmap, - limit, next_bit, pages, 0, - boundary_size, align_mask); - if (address != -1) { - address = dom->aperture[i]->offset + - (address << PAGE_SHIFT); - dom->next_address = address + (pages << PAGE_SHIFT); - break; - } - - next_bit = 0; - } - - return address; -} - -static unsigned long dma_ops_alloc_addresses(struct device *dev, - struct dma_ops_domain *dom, - unsigned int pages, - unsigned long align_mask, - u64 dma_mask) -{ - unsigned long address; - -#ifdef CONFIG_IOMMU_STRESS - dom->next_address = 0; - dom->need_flush = true; -#endif - - address = dma_ops_area_alloc(dev, dom, pages, align_mask, - dma_mask, dom->next_address); - - if (address == -1) { - dom->next_address = 0; - address = dma_ops_area_alloc(dev, dom, pages, align_mask, - dma_mask, 0); - dom->need_flush = true; - } - - if (unlikely(address == -1)) - address = DMA_ERROR_CODE; - - WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); - - return address; -} - -/* - * The address free function. - * - * called with domain->lock held - */ -static void dma_ops_free_addresses(struct dma_ops_domain *dom, - unsigned long address, - unsigned int pages) -{ - unsigned i = address >> APERTURE_RANGE_SHIFT; - struct aperture_range *range = dom->aperture[i]; - - BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL); - -#ifdef CONFIG_IOMMU_STRESS - if (i < 4) - return; -#endif - - if (address >= dom->next_address) - dom->need_flush = true; - - address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; - - bitmap_clear(range->bitmap, address, pages); - -} - -/**************************************************************************** - * - * The next functions belong to the domain allocation. A domain is - * allocated for every IOMMU as the default domain. If device isolation - * is enabled, every device get its own domain. The most important thing - * about domains is the page table mapping the DMA address space they - * contain. - * - ****************************************************************************/ - -/* - * This function adds a protection domain to the global protection domain list - */ -static void add_domain_to_list(struct protection_domain *domain) -{ - unsigned long flags; - - spin_lock_irqsave(&amd_iommu_pd_lock, flags); - list_add(&domain->list, &amd_iommu_pd_list); - spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); -} - -/* - * This function removes a protection domain to the global - * protection domain list - */ -static void del_domain_from_list(struct protection_domain *domain) -{ - unsigned long flags; - - spin_lock_irqsave(&amd_iommu_pd_lock, flags); - list_del(&domain->list); - spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); -} - -static u16 domain_id_alloc(void) -{ - unsigned long flags; - int id; - - write_lock_irqsave(&amd_iommu_devtable_lock, flags); - id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID); - BUG_ON(id == 0); - if (id > 0 && id < MAX_DOMAIN_ID) - __set_bit(id, amd_iommu_pd_alloc_bitmap); - else - id = 0; - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - - return id; -} - -static void domain_id_free(int id) -{ - unsigned long flags; - - write_lock_irqsave(&amd_iommu_devtable_lock, flags); - if (id > 0 && id < MAX_DOMAIN_ID) - __clear_bit(id, amd_iommu_pd_alloc_bitmap); - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); -} - -static void free_pagetable(struct protection_domain *domain) -{ - int i, j; - u64 *p1, *p2, *p3; - - p1 = domain->pt_root; - - if (!p1) - return; - - for (i = 0; i < 512; ++i) { - if (!IOMMU_PTE_PRESENT(p1[i])) - continue; - - p2 = IOMMU_PTE_PAGE(p1[i]); - for (j = 0; j < 512; ++j) { - if (!IOMMU_PTE_PRESENT(p2[j])) - continue; - p3 = IOMMU_PTE_PAGE(p2[j]); - free_page((unsigned long)p3); - } - - free_page((unsigned long)p2); - } - - free_page((unsigned long)p1); - - domain->pt_root = NULL; -} - -/* - * Free a domain, only used if something went wrong in the - * allocation path and we need to free an already allocated page table - */ -static void dma_ops_domain_free(struct dma_ops_domain *dom) -{ - int i; - - if (!dom) - return; - - del_domain_from_list(&dom->domain); - - free_pagetable(&dom->domain); - - for (i = 0; i < APERTURE_MAX_RANGES; ++i) { - if (!dom->aperture[i]) - continue; - free_page((unsigned long)dom->aperture[i]->bitmap); - kfree(dom->aperture[i]); - } - - kfree(dom); -} - -/* - * Allocates a new protection domain usable for the dma_ops functions. - * It also initializes the page table and the address allocator data - * structures required for the dma_ops interface - */ -static struct dma_ops_domain *dma_ops_domain_alloc(void) -{ - struct dma_ops_domain *dma_dom; - - dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); - if (!dma_dom) - return NULL; - - spin_lock_init(&dma_dom->domain.lock); - - dma_dom->domain.id = domain_id_alloc(); - if (dma_dom->domain.id == 0) - goto free_dma_dom; - INIT_LIST_HEAD(&dma_dom->domain.dev_list); - dma_dom->domain.mode = PAGE_MODE_2_LEVEL; - dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); - dma_dom->domain.flags = PD_DMA_OPS_MASK; - dma_dom->domain.priv = dma_dom; - if (!dma_dom->domain.pt_root) - goto free_dma_dom; - - dma_dom->need_flush = false; - dma_dom->target_dev = 0xffff; - - add_domain_to_list(&dma_dom->domain); - - if (alloc_new_range(dma_dom, true, GFP_KERNEL)) - goto free_dma_dom; - - /* - * mark the first page as allocated so we never return 0 as - * a valid dma-address. So we can use 0 as error value - */ - dma_dom->aperture[0]->bitmap[0] = 1; - dma_dom->next_address = 0; - - - return dma_dom; - -free_dma_dom: - dma_ops_domain_free(dma_dom); - - return NULL; -} - -/* - * little helper function to check whether a given protection domain is a - * dma_ops domain - */ -static bool dma_ops_domain(struct protection_domain *domain) -{ - return domain->flags & PD_DMA_OPS_MASK; -} - -static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) -{ - u64 pte_root = virt_to_phys(domain->pt_root); - u32 flags = 0; - - pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) - << DEV_ENTRY_MODE_SHIFT; - pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; - - if (ats) - flags |= DTE_FLAG_IOTLB; - - amd_iommu_dev_table[devid].data[3] |= flags; - amd_iommu_dev_table[devid].data[2] = domain->id; - amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); - amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); -} - -static void clear_dte_entry(u16 devid) -{ - /* remove entry from the device table seen by the hardware */ - amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; - amd_iommu_dev_table[devid].data[1] = 0; - amd_iommu_dev_table[devid].data[2] = 0; - - amd_iommu_apply_erratum_63(devid); -} - -static void do_attach(struct device *dev, struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data; - struct amd_iommu *iommu; - struct pci_dev *pdev; - bool ats = false; - u16 devid; - - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; - dev_data = get_dev_data(dev); - pdev = to_pci_dev(dev); - - if (amd_iommu_iotlb_sup) - ats = pci_ats_enabled(pdev); - - /* Update data structures */ - dev_data->domain = domain; - list_add(&dev_data->list, &domain->dev_list); - set_dte_entry(devid, domain, ats); - - /* Do reference counting */ - domain->dev_iommu[iommu->index] += 1; - domain->dev_cnt += 1; - - /* Flush the DTE entry */ - device_flush_dte(dev); -} - -static void do_detach(struct device *dev) -{ - struct iommu_dev_data *dev_data; - struct amd_iommu *iommu; - u16 devid; - - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; - dev_data = get_dev_data(dev); - - /* decrease reference counters */ - dev_data->domain->dev_iommu[iommu->index] -= 1; - dev_data->domain->dev_cnt -= 1; - - /* Update data structures */ - dev_data->domain = NULL; - list_del(&dev_data->list); - clear_dte_entry(devid); - - /* Flush the DTE entry */ - device_flush_dte(dev); -} - -/* - * If a device is not yet associated with a domain, this function does - * assigns it visible for the hardware - */ -static int __attach_device(struct device *dev, - struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data, *alias_data; - int ret; - - dev_data = get_dev_data(dev); - alias_data = get_dev_data(dev_data->alias); - - if (!alias_data) - return -EINVAL; - - /* lock domain */ - spin_lock(&domain->lock); - - /* Some sanity checks */ - ret = -EBUSY; - if (alias_data->domain != NULL && - alias_data->domain != domain) - goto out_unlock; - - if (dev_data->domain != NULL && - dev_data->domain != domain) - goto out_unlock; - - /* Do real assignment */ - if (dev_data->alias != dev) { - alias_data = get_dev_data(dev_data->alias); - if (alias_data->domain == NULL) - do_attach(dev_data->alias, domain); - - atomic_inc(&alias_data->bind); - } - - if (dev_data->domain == NULL) - do_attach(dev, domain); - - atomic_inc(&dev_data->bind); - - ret = 0; - -out_unlock: - - /* ready */ - spin_unlock(&domain->lock); - - return ret; -} - -/* - * If a device is not yet associated with a domain, this function does - * assigns it visible for the hardware - */ -static int attach_device(struct device *dev, - struct protection_domain *domain) -{ - struct pci_dev *pdev = to_pci_dev(dev); - unsigned long flags; - int ret; - - if (amd_iommu_iotlb_sup) - pci_enable_ats(pdev, PAGE_SHIFT); - - write_lock_irqsave(&amd_iommu_devtable_lock, flags); - ret = __attach_device(dev, domain); - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - - /* - * We might boot into a crash-kernel here. The crashed kernel - * left the caches in the IOMMU dirty. So we have to flush - * here to evict all dirty stuff. - */ - domain_flush_tlb_pde(domain); - - return ret; -} - -/* - * Removes a device from a protection domain (unlocked) - */ -static void __detach_device(struct device *dev) -{ - struct iommu_dev_data *dev_data = get_dev_data(dev); - struct iommu_dev_data *alias_data; - struct protection_domain *domain; - unsigned long flags; - - BUG_ON(!dev_data->domain); - - domain = dev_data->domain; - - spin_lock_irqsave(&domain->lock, flags); - - if (dev_data->alias != dev) { - alias_data = get_dev_data(dev_data->alias); - if (atomic_dec_and_test(&alias_data->bind)) - do_detach(dev_data->alias); - } - - if (atomic_dec_and_test(&dev_data->bind)) - do_detach(dev); - - spin_unlock_irqrestore(&domain->lock, flags); - - /* - * If we run in passthrough mode the device must be assigned to the - * passthrough domain if it is detached from any other domain. - * Make sure we can deassign from the pt_domain itself. - */ - if (iommu_pass_through && - (dev_data->domain == NULL && domain != pt_domain)) - __attach_device(dev, pt_domain); -} - -/* - * Removes a device from a protection domain (with devtable_lock held) - */ -static void detach_device(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - unsigned long flags; - - /* lock device table */ - write_lock_irqsave(&amd_iommu_devtable_lock, flags); - __detach_device(dev); - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - - if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev)) - pci_disable_ats(pdev); -} - -/* - * Find out the protection domain structure for a given PCI device. This - * will give us the pointer to the page table root for example. - */ -static struct protection_domain *domain_for_device(struct device *dev) -{ - struct protection_domain *dom; - struct iommu_dev_data *dev_data, *alias_data; - unsigned long flags; - u16 devid; - - devid = get_device_id(dev); - dev_data = get_dev_data(dev); - alias_data = get_dev_data(dev_data->alias); - if (!alias_data) - return NULL; - - read_lock_irqsave(&amd_iommu_devtable_lock, flags); - dom = dev_data->domain; - if (dom == NULL && - alias_data->domain != NULL) { - __attach_device(dev, alias_data->domain); - dom = alias_data->domain; - } - - read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - - return dom; -} - -static int device_change_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - u16 devid; - struct protection_domain *domain; - struct dma_ops_domain *dma_domain; - struct amd_iommu *iommu; - unsigned long flags; - - if (!check_device(dev)) - return 0; - - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; - - switch (action) { - case BUS_NOTIFY_UNBOUND_DRIVER: - - domain = domain_for_device(dev); - - if (!domain) - goto out; - if (iommu_pass_through) - break; - detach_device(dev); - break; - case BUS_NOTIFY_ADD_DEVICE: - - iommu_init_device(dev); - - domain = domain_for_device(dev); - - /* allocate a protection domain if a device is added */ - dma_domain = find_protection_domain(devid); - if (dma_domain) - goto out; - dma_domain = dma_ops_domain_alloc(); - if (!dma_domain) - goto out; - dma_domain->target_dev = devid; - - spin_lock_irqsave(&iommu_pd_list_lock, flags); - list_add_tail(&dma_domain->list, &iommu_pd_list); - spin_unlock_irqrestore(&iommu_pd_list_lock, flags); - - break; - case BUS_NOTIFY_DEL_DEVICE: - - iommu_uninit_device(dev); - - default: - goto out; - } - - device_flush_dte(dev); - iommu_completion_wait(iommu); - -out: - return 0; -} - -static struct notifier_block device_nb = { - .notifier_call = device_change_notifier, -}; - -void amd_iommu_init_notifier(void) -{ - bus_register_notifier(&pci_bus_type, &device_nb); -} - -/***************************************************************************** - * - * The next functions belong to the dma_ops mapping/unmapping code. - * - *****************************************************************************/ - -/* - * In the dma_ops path we only have the struct device. This function - * finds the corresponding IOMMU, the protection domain and the - * requestor id for a given device. - * If the device is not yet associated with a domain this is also done - * in this function. - */ -static struct protection_domain *get_domain(struct device *dev) -{ - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - u16 devid = get_device_id(dev); - - if (!check_device(dev)) - return ERR_PTR(-EINVAL); - - domain = domain_for_device(dev); - if (domain != NULL && !dma_ops_domain(domain)) - return ERR_PTR(-EBUSY); - - if (domain != NULL) - return domain; - - /* Device not bount yet - bind it */ - dma_dom = find_protection_domain(devid); - if (!dma_dom) - dma_dom = amd_iommu_rlookup_table[devid]->default_dom; - attach_device(dev, &dma_dom->domain); - DUMP_printk("Using protection domain %d for device %s\n", - dma_dom->domain.id, dev_name(dev)); - - return &dma_dom->domain; -} - -static void update_device_table(struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data; - - list_for_each_entry(dev_data, &domain->dev_list, list) { - struct pci_dev *pdev = to_pci_dev(dev_data->dev); - u16 devid = get_device_id(dev_data->dev); - set_dte_entry(devid, domain, pci_ats_enabled(pdev)); - } -} - -static void update_domain(struct protection_domain *domain) -{ - if (!domain->updated) - return; - - update_device_table(domain); - - domain_flush_devices(domain); - domain_flush_tlb_pde(domain); - - domain->updated = false; -} - -/* - * This function fetches the PTE for a given address in the aperture - */ -static u64* dma_ops_get_pte(struct dma_ops_domain *dom, - unsigned long address) -{ - struct aperture_range *aperture; - u64 *pte, *pte_page; - - aperture = dom->aperture[APERTURE_RANGE_INDEX(address)]; - if (!aperture) - return NULL; - - pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; - if (!pte) { - pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, - GFP_ATOMIC); - aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; - } else - pte += PM_LEVEL_INDEX(0, address); - - update_domain(&dom->domain); - - return pte; -} - -/* - * This is the generic map function. It maps one 4kb page at paddr to - * the given address in the DMA address space for the domain. - */ -static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom, - unsigned long address, - phys_addr_t paddr, - int direction) -{ - u64 *pte, __pte; - - WARN_ON(address > dom->aperture_size); - - paddr &= PAGE_MASK; - - pte = dma_ops_get_pte(dom, address); - if (!pte) - return DMA_ERROR_CODE; - - __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; - - if (direction == DMA_TO_DEVICE) - __pte |= IOMMU_PTE_IR; - else if (direction == DMA_FROM_DEVICE) - __pte |= IOMMU_PTE_IW; - else if (direction == DMA_BIDIRECTIONAL) - __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW; - - WARN_ON(*pte); - - *pte = __pte; - - return (dma_addr_t)address; -} - -/* - * The generic unmapping function for on page in the DMA address space. - */ -static void dma_ops_domain_unmap(struct dma_ops_domain *dom, - unsigned long address) -{ - struct aperture_range *aperture; - u64 *pte; - - if (address >= dom->aperture_size) - return; - - aperture = dom->aperture[APERTURE_RANGE_INDEX(address)]; - if (!aperture) - return; - - pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; - if (!pte) - return; - - pte += PM_LEVEL_INDEX(0, address); - - WARN_ON(!*pte); - - *pte = 0ULL; -} - -/* - * This function contains common code for mapping of a physically - * contiguous memory region into DMA address space. It is used by all - * mapping functions provided with this IOMMU driver. - * Must be called with the domain lock held. - */ -static dma_addr_t __map_single(struct device *dev, - struct dma_ops_domain *dma_dom, - phys_addr_t paddr, - size_t size, - int dir, - bool align, - u64 dma_mask) -{ - dma_addr_t offset = paddr & ~PAGE_MASK; - dma_addr_t address, start, ret; - unsigned int pages; - unsigned long align_mask = 0; - int i; - - pages = iommu_num_pages(paddr, size, PAGE_SIZE); - paddr &= PAGE_MASK; - - INC_STATS_COUNTER(total_map_requests); - - if (pages > 1) - INC_STATS_COUNTER(cross_page); - - if (align) - align_mask = (1UL << get_order(size)) - 1; - -retry: - address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, - dma_mask); - if (unlikely(address == DMA_ERROR_CODE)) { - /* - * setting next_address here will let the address - * allocator only scan the new allocated range in the - * first run. This is a small optimization. - */ - dma_dom->next_address = dma_dom->aperture_size; - - if (alloc_new_range(dma_dom, false, GFP_ATOMIC)) - goto out; - - /* - * aperture was successfully enlarged by 128 MB, try - * allocation again - */ - goto retry; - } - - start = address; - for (i = 0; i < pages; ++i) { - ret = dma_ops_domain_map(dma_dom, start, paddr, dir); - if (ret == DMA_ERROR_CODE) - goto out_unmap; - - paddr += PAGE_SIZE; - start += PAGE_SIZE; - } - address += offset; - - ADD_STATS_COUNTER(alloced_io_mem, size); - - if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { - domain_flush_tlb(&dma_dom->domain); - dma_dom->need_flush = false; - } else if (unlikely(amd_iommu_np_cache)) - domain_flush_pages(&dma_dom->domain, address, size); - -out: - return address; - -out_unmap: - - for (--i; i >= 0; --i) { - start -= PAGE_SIZE; - dma_ops_domain_unmap(dma_dom, start); - } - - dma_ops_free_addresses(dma_dom, address, pages); - - return DMA_ERROR_CODE; -} - -/* - * Does the reverse of the __map_single function. Must be called with - * the domain lock held too - */ -static void __unmap_single(struct dma_ops_domain *dma_dom, - dma_addr_t dma_addr, - size_t size, - int dir) -{ - dma_addr_t flush_addr; - dma_addr_t i, start; - unsigned int pages; - - if ((dma_addr == DMA_ERROR_CODE) || - (dma_addr + size > dma_dom->aperture_size)) - return; - - flush_addr = dma_addr; - pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); - dma_addr &= PAGE_MASK; - start = dma_addr; - - for (i = 0; i < pages; ++i) { - dma_ops_domain_unmap(dma_dom, start); - start += PAGE_SIZE; - } - - SUB_STATS_COUNTER(alloced_io_mem, size); - - dma_ops_free_addresses(dma_dom, dma_addr, pages); - - if (amd_iommu_unmap_flush || dma_dom->need_flush) { - domain_flush_pages(&dma_dom->domain, flush_addr, size); - dma_dom->need_flush = false; - } -} - -/* - * The exported map_single function for dma_ops. - */ -static dma_addr_t map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - unsigned long flags; - struct protection_domain *domain; - dma_addr_t addr; - u64 dma_mask; - phys_addr_t paddr = page_to_phys(page) + offset; - - INC_STATS_COUNTER(cnt_map_single); - - domain = get_domain(dev); - if (PTR_ERR(domain) == -EINVAL) - return (dma_addr_t)paddr; - else if (IS_ERR(domain)) - return DMA_ERROR_CODE; - - dma_mask = *dev->dma_mask; - - spin_lock_irqsave(&domain->lock, flags); - - addr = __map_single(dev, domain->priv, paddr, size, dir, false, - dma_mask); - if (addr == DMA_ERROR_CODE) - goto out; - - domain_flush_complete(domain); - -out: - spin_unlock_irqrestore(&domain->lock, flags); - - return addr; -} - -/* - * The exported unmap_single function for dma_ops. - */ -static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) -{ - unsigned long flags; - struct protection_domain *domain; - - INC_STATS_COUNTER(cnt_unmap_single); - - domain = get_domain(dev); - if (IS_ERR(domain)) - return; - - spin_lock_irqsave(&domain->lock, flags); - - __unmap_single(domain->priv, dma_addr, size, dir); - - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); -} - -/* - * This is a special map_sg function which is used if we should map a - * device which is not handled by an AMD IOMMU in the system. - */ -static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, - int nelems, int dir) -{ - struct scatterlist *s; - int i; - - for_each_sg(sglist, s, nelems, i) { - s->dma_address = (dma_addr_t)sg_phys(s); - s->dma_length = s->length; - } - - return nelems; -} - -/* - * The exported map_sg function for dma_ops (handles scatter-gather - * lists). - */ -static int map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - unsigned long flags; - struct protection_domain *domain; - int i; - struct scatterlist *s; - phys_addr_t paddr; - int mapped_elems = 0; - u64 dma_mask; - - INC_STATS_COUNTER(cnt_map_sg); - - domain = get_domain(dev); - if (PTR_ERR(domain) == -EINVAL) - return map_sg_no_iommu(dev, sglist, nelems, dir); - else if (IS_ERR(domain)) - return 0; - - dma_mask = *dev->dma_mask; - - spin_lock_irqsave(&domain->lock, flags); - - for_each_sg(sglist, s, nelems, i) { - paddr = sg_phys(s); - - s->dma_address = __map_single(dev, domain->priv, - paddr, s->length, dir, false, - dma_mask); - - if (s->dma_address) { - s->dma_length = s->length; - mapped_elems++; - } else - goto unmap; - } - - domain_flush_complete(domain); - -out: - spin_unlock_irqrestore(&domain->lock, flags); - - return mapped_elems; -unmap: - for_each_sg(sglist, s, mapped_elems, i) { - if (s->dma_address) - __unmap_single(domain->priv, s->dma_address, - s->dma_length, dir); - s->dma_address = s->dma_length = 0; - } - - mapped_elems = 0; - - goto out; -} - -/* - * The exported map_sg function for dma_ops (handles scatter-gather - * lists). - */ -static void unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - unsigned long flags; - struct protection_domain *domain; - struct scatterlist *s; - int i; - - INC_STATS_COUNTER(cnt_unmap_sg); - - domain = get_domain(dev); - if (IS_ERR(domain)) - return; - - spin_lock_irqsave(&domain->lock, flags); - - for_each_sg(sglist, s, nelems, i) { - __unmap_single(domain->priv, s->dma_address, - s->dma_length, dir); - s->dma_address = s->dma_length = 0; - } - - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); -} - -/* - * The exported alloc_coherent function for dma_ops. - */ -static void *alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag) -{ - unsigned long flags; - void *virt_addr; - struct protection_domain *domain; - phys_addr_t paddr; - u64 dma_mask = dev->coherent_dma_mask; - - INC_STATS_COUNTER(cnt_alloc_coherent); - - domain = get_domain(dev); - if (PTR_ERR(domain) == -EINVAL) { - virt_addr = (void *)__get_free_pages(flag, get_order(size)); - *dma_addr = __pa(virt_addr); - return virt_addr; - } else if (IS_ERR(domain)) - return NULL; - - dma_mask = dev->coherent_dma_mask; - flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); - flag |= __GFP_ZERO; - - virt_addr = (void *)__get_free_pages(flag, get_order(size)); - if (!virt_addr) - return NULL; - - paddr = virt_to_phys(virt_addr); - - if (!dma_mask) - dma_mask = *dev->dma_mask; - - spin_lock_irqsave(&domain->lock, flags); - - *dma_addr = __map_single(dev, domain->priv, paddr, - size, DMA_BIDIRECTIONAL, true, dma_mask); - - if (*dma_addr == DMA_ERROR_CODE) { - spin_unlock_irqrestore(&domain->lock, flags); - goto out_free; - } - - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); - - return virt_addr; - -out_free: - - free_pages((unsigned long)virt_addr, get_order(size)); - - return NULL; -} - -/* - * The exported free_coherent function for dma_ops. - */ -static void free_coherent(struct device *dev, size_t size, - void *virt_addr, dma_addr_t dma_addr) -{ - unsigned long flags; - struct protection_domain *domain; - - INC_STATS_COUNTER(cnt_free_coherent); - - domain = get_domain(dev); - if (IS_ERR(domain)) - goto free_mem; - - spin_lock_irqsave(&domain->lock, flags); - - __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); - - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); - -free_mem: - free_pages((unsigned long)virt_addr, get_order(size)); -} - -/* - * This function is called by the DMA layer to find out if we can handle a - * particular device. It is part of the dma_ops. - */ -static int amd_iommu_dma_supported(struct device *dev, u64 mask) -{ - return check_device(dev); -} - -/* - * The function for pre-allocating protection domains. - * - * If the driver core informs the DMA layer if a driver grabs a device - * we don't need to preallocate the protection domains anymore. - * For now we have to. - */ -static void prealloc_protection_domains(void) -{ - struct pci_dev *dev = NULL; - struct dma_ops_domain *dma_dom; - u16 devid; - - for_each_pci_dev(dev) { - - /* Do we handle this device? */ - if (!check_device(&dev->dev)) - continue; - - /* Is there already any domain for it? */ - if (domain_for_device(&dev->dev)) - continue; - - devid = get_device_id(&dev->dev); - - dma_dom = dma_ops_domain_alloc(); - if (!dma_dom) - continue; - init_unity_mappings_for_device(dma_dom, devid); - dma_dom->target_dev = devid; - - attach_device(&dev->dev, &dma_dom->domain); - - list_add_tail(&dma_dom->list, &iommu_pd_list); - } -} - -static struct dma_map_ops amd_iommu_dma_ops = { - .alloc_coherent = alloc_coherent, - .free_coherent = free_coherent, - .map_page = map_page, - .unmap_page = unmap_page, - .map_sg = map_sg, - .unmap_sg = unmap_sg, - .dma_supported = amd_iommu_dma_supported, -}; - -static unsigned device_dma_ops_init(void) -{ - struct pci_dev *pdev = NULL; - unsigned unhandled = 0; - - for_each_pci_dev(pdev) { - if (!check_device(&pdev->dev)) { - unhandled += 1; - continue; - } - - pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops; - } - - return unhandled; -} - -/* - * The function which clues the AMD IOMMU driver into dma_ops. - */ - -void __init amd_iommu_init_api(void) -{ - register_iommu(&amd_iommu_ops); -} - -int __init amd_iommu_init_dma_ops(void) -{ - struct amd_iommu *iommu; - int ret, unhandled; - - /* - * first allocate a default protection domain for every IOMMU we - * found in the system. Devices not assigned to any other - * protection domain will be assigned to the default one. - */ - for_each_iommu(iommu) { - iommu->default_dom = dma_ops_domain_alloc(); - if (iommu->default_dom == NULL) - return -ENOMEM; - iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; - ret = iommu_init_unity_mappings(iommu); - if (ret) - goto free_domains; - } - - /* - * Pre-allocate the protection domains for each device. - */ - prealloc_protection_domains(); - - iommu_detected = 1; - swiotlb = 0; - - /* Make the driver finally visible to the drivers */ - unhandled = device_dma_ops_init(); - if (unhandled && max_pfn > MAX_DMA32_PFN) { - /* There are unhandled devices - initialize swiotlb for them */ - swiotlb = 1; - } - - amd_iommu_stats_init(); - - return 0; - -free_domains: - - for_each_iommu(iommu) { - if (iommu->default_dom) - dma_ops_domain_free(iommu->default_dom); - } - - return ret; -} - -/***************************************************************************** - * - * The following functions belong to the exported interface of AMD IOMMU - * - * This interface allows access to lower level functions of the IOMMU - * like protection domain handling and assignement of devices to domains - * which is not possible with the dma_ops interface. - * - *****************************************************************************/ - -static void cleanup_domain(struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data, *next; - unsigned long flags; - - write_lock_irqsave(&amd_iommu_devtable_lock, flags); - - list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { - struct device *dev = dev_data->dev; - - __detach_device(dev); - atomic_set(&dev_data->bind, 0); - } - - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); -} - -static void protection_domain_free(struct protection_domain *domain) -{ - if (!domain) - return; - - del_domain_from_list(domain); - - if (domain->id) - domain_id_free(domain->id); - - kfree(domain); -} - -static struct protection_domain *protection_domain_alloc(void) -{ - struct protection_domain *domain; - - domain = kzalloc(sizeof(*domain), GFP_KERNEL); - if (!domain) - return NULL; - - spin_lock_init(&domain->lock); - mutex_init(&domain->api_lock); - domain->id = domain_id_alloc(); - if (!domain->id) - goto out_err; - INIT_LIST_HEAD(&domain->dev_list); - - add_domain_to_list(domain); - - return domain; - -out_err: - kfree(domain); - - return NULL; -} - -static int amd_iommu_domain_init(struct iommu_domain *dom) -{ - struct protection_domain *domain; - - domain = protection_domain_alloc(); - if (!domain) - goto out_free; - - domain->mode = PAGE_MODE_3_LEVEL; - domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); - if (!domain->pt_root) - goto out_free; - - dom->priv = domain; - - return 0; - -out_free: - protection_domain_free(domain); - - return -ENOMEM; -} - -static void amd_iommu_domain_destroy(struct iommu_domain *dom) -{ - struct protection_domain *domain = dom->priv; - - if (!domain) - return; - - if (domain->dev_cnt > 0) - cleanup_domain(domain); - - BUG_ON(domain->dev_cnt != 0); - - free_pagetable(domain); - - protection_domain_free(domain); - - dom->priv = NULL; -} - -static void amd_iommu_detach_device(struct iommu_domain *dom, - struct device *dev) -{ - struct iommu_dev_data *dev_data = dev->archdata.iommu; - struct amd_iommu *iommu; - u16 devid; - - if (!check_device(dev)) - return; - - devid = get_device_id(dev); - - if (dev_data->domain != NULL) - detach_device(dev); - - iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - return; - - device_flush_dte(dev); - iommu_completion_wait(iommu); -} - -static int amd_iommu_attach_device(struct iommu_domain *dom, - struct device *dev) -{ - struct protection_domain *domain = dom->priv; - struct iommu_dev_data *dev_data; - struct amd_iommu *iommu; - int ret; - u16 devid; - - if (!check_device(dev)) - return -EINVAL; - - dev_data = dev->archdata.iommu; - - devid = get_device_id(dev); - - iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - return -EINVAL; - - if (dev_data->domain) - detach_device(dev); - - ret = attach_device(dev, domain); - - iommu_completion_wait(iommu); - - return ret; -} - -static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, - phys_addr_t paddr, int gfp_order, int iommu_prot) -{ - unsigned long page_size = 0x1000UL << gfp_order; - struct protection_domain *domain = dom->priv; - int prot = 0; - int ret; - - if (iommu_prot & IOMMU_READ) - prot |= IOMMU_PROT_IR; - if (iommu_prot & IOMMU_WRITE) - prot |= IOMMU_PROT_IW; - - mutex_lock(&domain->api_lock); - ret = iommu_map_page(domain, iova, paddr, prot, page_size); - mutex_unlock(&domain->api_lock); - - return ret; -} - -static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, - int gfp_order) -{ - struct protection_domain *domain = dom->priv; - unsigned long page_size, unmap_size; - - page_size = 0x1000UL << gfp_order; - - mutex_lock(&domain->api_lock); - unmap_size = iommu_unmap_page(domain, iova, page_size); - mutex_unlock(&domain->api_lock); - - domain_flush_tlb_pde(domain); - - return get_order(unmap_size); -} - -static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, - unsigned long iova) -{ - struct protection_domain *domain = dom->priv; - unsigned long offset_mask; - phys_addr_t paddr; - u64 *pte, __pte; - - pte = fetch_pte(domain, iova); - - if (!pte || !IOMMU_PTE_PRESENT(*pte)) - return 0; - - if (PM_PTE_LEVEL(*pte) == 0) - offset_mask = PAGE_SIZE - 1; - else - offset_mask = PTE_PAGE_SIZE(*pte) - 1; - - __pte = *pte & PM_ADDR_MASK; - paddr = (__pte & ~offset_mask) | (iova & offset_mask); - - return paddr; -} - -static int amd_iommu_domain_has_cap(struct iommu_domain *domain, - unsigned long cap) -{ - switch (cap) { - case IOMMU_CAP_CACHE_COHERENCY: - return 1; - } - - return 0; -} - -static struct iommu_ops amd_iommu_ops = { - .domain_init = amd_iommu_domain_init, - .domain_destroy = amd_iommu_domain_destroy, - .attach_dev = amd_iommu_attach_device, - .detach_dev = amd_iommu_detach_device, - .map = amd_iommu_map, - .unmap = amd_iommu_unmap, - .iova_to_phys = amd_iommu_iova_to_phys, - .domain_has_cap = amd_iommu_domain_has_cap, -}; - -/***************************************************************************** - * - * The next functions do a basic initialization of IOMMU for pass through - * mode - * - * In passthrough mode the IOMMU is initialized and enabled but not used for - * DMA-API translation. - * - *****************************************************************************/ - -int __init amd_iommu_init_passthrough(void) -{ - struct amd_iommu *iommu; - struct pci_dev *dev = NULL; - u16 devid; - - /* allocate passthrough domain */ - pt_domain = protection_domain_alloc(); - if (!pt_domain) - return -ENOMEM; - - pt_domain->mode |= PAGE_MODE_NONE; - - for_each_pci_dev(dev) { - if (!check_device(&dev->dev)) - continue; - - devid = get_device_id(&dev->dev); - - iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - continue; - - attach_device(&dev->dev, pt_domain); - } - - pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); - - return 0; -} diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c deleted file mode 100644 index bfc8453bd98..00000000000 --- a/arch/x86/kernel/amd_iommu_init.c +++ /dev/null @@ -1,1572 +0,0 @@ -/* - * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <joerg.roedel@amd.com> - * Leo Duran <leo.duran@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include <linux/pci.h> -#include <linux/acpi.h> -#include <linux/list.h> -#include <linux/slab.h> -#include <linux/syscore_ops.h> -#include <linux/interrupt.h> -#include <linux/msi.h> -#include <asm/pci-direct.h> -#include <asm/amd_iommu_proto.h> -#include <asm/amd_iommu_types.h> -#include <asm/amd_iommu.h> -#include <asm/iommu.h> -#include <asm/gart.h> -#include <asm/x86_init.h> -#include <asm/iommu_table.h> -/* - * definitions for the ACPI scanning code - */ -#define IVRS_HEADER_LENGTH 48 - -#define ACPI_IVHD_TYPE 0x10 -#define ACPI_IVMD_TYPE_ALL 0x20 -#define ACPI_IVMD_TYPE 0x21 -#define ACPI_IVMD_TYPE_RANGE 0x22 - -#define IVHD_DEV_ALL 0x01 -#define IVHD_DEV_SELECT 0x02 -#define IVHD_DEV_SELECT_RANGE_START 0x03 -#define IVHD_DEV_RANGE_END 0x04 -#define IVHD_DEV_ALIAS 0x42 -#define IVHD_DEV_ALIAS_RANGE 0x43 -#define IVHD_DEV_EXT_SELECT 0x46 -#define IVHD_DEV_EXT_SELECT_RANGE 0x47 - -#define IVHD_FLAG_HT_TUN_EN_MASK 0x01 -#define IVHD_FLAG_PASSPW_EN_MASK 0x02 -#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 -#define IVHD_FLAG_ISOC_EN_MASK 0x08 - -#define IVMD_FLAG_EXCL_RANGE 0x08 -#define IVMD_FLAG_UNITY_MAP 0x01 - -#define ACPI_DEVFLAG_INITPASS 0x01 -#define ACPI_DEVFLAG_EXTINT 0x02 -#define ACPI_DEVFLAG_NMI 0x04 -#define ACPI_DEVFLAG_SYSMGT1 0x10 -#define ACPI_DEVFLAG_SYSMGT2 0x20 -#define ACPI_DEVFLAG_LINT0 0x40 -#define ACPI_DEVFLAG_LINT1 0x80 -#define ACPI_DEVFLAG_ATSDIS 0x10000000 - -/* - * ACPI table definitions - * - * These data structures are laid over the table to parse the important values - * out of it. - */ - -/* - * structure describing one IOMMU in the ACPI table. Typically followed by one - * or more ivhd_entrys. - */ -struct ivhd_header { - u8 type; - u8 flags; - u16 length; - u16 devid; - u16 cap_ptr; - u64 mmio_phys; - u16 pci_seg; - u16 info; - u32 reserved; -} __attribute__((packed)); - -/* - * A device entry describing which devices a specific IOMMU translates and - * which requestor ids they use. - */ -struct ivhd_entry { - u8 type; - u16 devid; - u8 flags; - u32 ext; -} __attribute__((packed)); - -/* - * An AMD IOMMU memory definition structure. It defines things like exclusion - * ranges for devices and regions that should be unity mapped. - */ -struct ivmd_header { - u8 type; - u8 flags; - u16 length; - u16 devid; - u16 aux; - u64 resv; - u64 range_start; - u64 range_length; -} __attribute__((packed)); - -bool amd_iommu_dump; - -static int __initdata amd_iommu_detected; -static bool __initdata amd_iommu_disabled; - -u16 amd_iommu_last_bdf; /* largest PCI device id we have - to handle */ -LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings - we find in ACPI */ -bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ - -LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the - system */ - -/* Array to assign indices to IOMMUs*/ -struct amd_iommu *amd_iommus[MAX_IOMMUS]; -int amd_iommus_present; - -/* IOMMUs have a non-present cache? */ -bool amd_iommu_np_cache __read_mostly; -bool amd_iommu_iotlb_sup __read_mostly = true; - -/* - * The ACPI table parsing functions set this variable on an error - */ -static int __initdata amd_iommu_init_err; - -/* - * List of protection domains - used during resume - */ -LIST_HEAD(amd_iommu_pd_list); -spinlock_t amd_iommu_pd_lock; - -/* - * Pointer to the device table which is shared by all AMD IOMMUs - * it is indexed by the PCI device id or the HT unit id and contains - * information about the domain the device belongs to as well as the - * page table root pointer. - */ -struct dev_table_entry *amd_iommu_dev_table; - -/* - * The alias table is a driver specific data structure which contains the - * mappings of the PCI device ids to the actual requestor ids on the IOMMU. - * More than one device can share the same requestor id. - */ -u16 *amd_iommu_alias_table; - -/* - * The rlookup table is used to find the IOMMU which is responsible - * for a specific device. It is also indexed by the PCI device id. - */ -struct amd_iommu **amd_iommu_rlookup_table; - -/* - * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap - * to know which ones are already in use. - */ -unsigned long *amd_iommu_pd_alloc_bitmap; - -static u32 dev_table_size; /* size of the device table */ -static u32 alias_table_size; /* size of the alias table */ -static u32 rlookup_table_size; /* size if the rlookup table */ - -/* - * This function flushes all internal caches of - * the IOMMU used by this driver. - */ -extern void iommu_flush_all_caches(struct amd_iommu *iommu); - -static inline void update_last_devid(u16 devid) -{ - if (devid > amd_iommu_last_bdf) - amd_iommu_last_bdf = devid; -} - -static inline unsigned long tbl_size(int entry_size) -{ - unsigned shift = PAGE_SHIFT + - get_order(((int)amd_iommu_last_bdf + 1) * entry_size); - - return 1UL << shift; -} - -/* Access to l1 and l2 indexed register spaces */ - -static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) -{ - u32 val; - - pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); - pci_read_config_dword(iommu->dev, 0xfc, &val); - return val; -} - -static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) -{ - pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); - pci_write_config_dword(iommu->dev, 0xfc, val); - pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); -} - -static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) -{ - u32 val; - - pci_write_config_dword(iommu->dev, 0xf0, address); - pci_read_config_dword(iommu->dev, 0xf4, &val); - return val; -} - -static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) -{ - pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); - pci_write_config_dword(iommu->dev, 0xf4, val); -} - -/**************************************************************************** - * - * AMD IOMMU MMIO register space handling functions - * - * These functions are used to program the IOMMU device registers in - * MMIO space required for that driver. - * - ****************************************************************************/ - -/* - * This function set the exclusion range in the IOMMU. DMA accesses to the - * exclusion range are passed through untranslated - */ -static void iommu_set_exclusion_range(struct amd_iommu *iommu) -{ - u64 start = iommu->exclusion_start & PAGE_MASK; - u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; - u64 entry; - - if (!iommu->exclusion_start) - return; - - entry = start | MMIO_EXCL_ENABLE_MASK; - memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, - &entry, sizeof(entry)); - - entry = limit; - memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, - &entry, sizeof(entry)); -} - -/* Programs the physical address of the device table into the IOMMU hardware */ -static void __init iommu_set_device_table(struct amd_iommu *iommu) -{ - u64 entry; - - BUG_ON(iommu->mmio_base == NULL); - - entry = virt_to_phys(amd_iommu_dev_table); - entry |= (dev_table_size >> 12) - 1; - memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, - &entry, sizeof(entry)); -} - -/* Generic functions to enable/disable certain features of the IOMMU. */ -static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) -{ - u32 ctrl; - - ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); - ctrl |= (1 << bit); - writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); -} - -static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) -{ - u32 ctrl; - - ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); - ctrl &= ~(1 << bit); - writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); -} - -/* Function to enable the hardware */ -static void iommu_enable(struct amd_iommu *iommu) -{ - static const char * const feat_str[] = { - "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", - "IA", "GA", "HE", "PC", NULL - }; - int i; - - printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx", - dev_name(&iommu->dev->dev), iommu->cap_ptr); - - if (iommu->cap & (1 << IOMMU_CAP_EFR)) { - printk(KERN_CONT " extended features: "); - for (i = 0; feat_str[i]; ++i) - if (iommu_feature(iommu, (1ULL << i))) - printk(KERN_CONT " %s", feat_str[i]); - } - printk(KERN_CONT "\n"); - - iommu_feature_enable(iommu, CONTROL_IOMMU_EN); -} - -static void iommu_disable(struct amd_iommu *iommu) -{ - /* Disable command buffer */ - iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); - - /* Disable event logging and event interrupts */ - iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); - iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); - - /* Disable IOMMU hardware itself */ - iommu_feature_disable(iommu, CONTROL_IOMMU_EN); -} - -/* - * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in - * the system has one. - */ -static u8 * __init iommu_map_mmio_space(u64 address) -{ - u8 *ret; - - if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { - pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", - address); - pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); - return NULL; - } - - ret = ioremap_nocache(address, MMIO_REGION_LENGTH); - if (ret != NULL) - return ret; - - release_mem_region(address, MMIO_REGION_LENGTH); - - return NULL; -} - -static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) -{ - if (iommu->mmio_base) - iounmap(iommu->mmio_base); - release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); -} - -/**************************************************************************** - * - * The functions below belong to the first pass of AMD IOMMU ACPI table - * parsing. In this pass we try to find out the highest device id this - * code has to handle. Upon this information the size of the shared data - * structures is determined later. - * - ****************************************************************************/ - -/* - * This function calculates the length of a given IVHD entry - */ -static inline int ivhd_entry_length(u8 *ivhd) -{ - return 0x04 << (*ivhd >> 6); -} - -/* - * This function reads the last device id the IOMMU has to handle from the PCI - * capability header for this IOMMU - */ -static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) -{ - u32 cap; - - cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); - update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); - - return 0; -} - -/* - * After reading the highest device id from the IOMMU PCI capability header - * this function looks if there is a higher device id defined in the ACPI table - */ -static int __init find_last_devid_from_ivhd(struct ivhd_header *h) -{ - u8 *p = (void *)h, *end = (void *)h; - struct ivhd_entry *dev; - - p += sizeof(*h); - end += h->length; - - find_last_devid_on_pci(PCI_BUS(h->devid), - PCI_SLOT(h->devid), - PCI_FUNC(h->devid), - h->cap_ptr); - - while (p < end) { - dev = (struct ivhd_entry *)p; - switch (dev->type) { - case IVHD_DEV_SELECT: - case IVHD_DEV_RANGE_END: - case IVHD_DEV_ALIAS: - case IVHD_DEV_EXT_SELECT: - /* all the above subfield types refer to device ids */ - update_last_devid(dev->devid); - break; - default: - break; - } - p += ivhd_entry_length(p); - } - - WARN_ON(p != end); - - return 0; -} - -/* - * Iterate over all IVHD entries in the ACPI table and find the highest device - * id which we need to handle. This is the first of three functions which parse - * the ACPI table. So we check the checksum here. - */ -static int __init find_last_devid_acpi(struct acpi_table_header *table) -{ - int i; - u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; - struct ivhd_header *h; - - /* - * Validate checksum here so we don't need to do it when - * we actually parse the table - */ - for (i = 0; i < table->length; ++i) - checksum += p[i]; - if (checksum != 0) { - /* ACPI table corrupt */ - amd_iommu_init_err = -ENODEV; - return 0; - } - - p += IVRS_HEADER_LENGTH; - - end += table->length; - while (p < end) { - h = (struct ivhd_header *)p; - switch (h->type) { - case ACPI_IVHD_TYPE: - find_last_devid_from_ivhd(h); - break; - default: - break; - } - p += h->length; - } - WARN_ON(p != end); - - return 0; -} - -/**************************************************************************** - * - * The following functions belong the the code path which parses the ACPI table - * the second time. In this ACPI parsing iteration we allocate IOMMU specific - * data structures, initialize the device/alias/rlookup table and also - * basically initialize the hardware. - * - ****************************************************************************/ - -/* - * Allocates the command buffer. This buffer is per AMD IOMMU. We can - * write commands to that buffer later and the IOMMU will execute them - * asynchronously - */ -static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) -{ - u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(CMD_BUFFER_SIZE)); - - if (cmd_buf == NULL) - return NULL; - - iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; - - return cmd_buf; -} - -/* - * This function resets the command buffer if the IOMMU stopped fetching - * commands from it. - */ -void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) -{ - iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); - - writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); - - iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); -} - -/* - * This function writes the command buffer address to the hardware and - * enables it. - */ -static void iommu_enable_command_buffer(struct amd_iommu *iommu) -{ - u64 entry; - - BUG_ON(iommu->cmd_buf == NULL); - - entry = (u64)virt_to_phys(iommu->cmd_buf); - entry |= MMIO_CMD_SIZE_512; - - memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, - &entry, sizeof(entry)); - - amd_iommu_reset_cmd_buffer(iommu); - iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); -} - -static void __init free_command_buffer(struct amd_iommu *iommu) -{ - free_pages((unsigned long)iommu->cmd_buf, - get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); -} - -/* allocates the memory where the IOMMU will log its events to */ -static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) -{ - iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(EVT_BUFFER_SIZE)); - - if (iommu->evt_buf == NULL) - return NULL; - - iommu->evt_buf_size = EVT_BUFFER_SIZE; - - return iommu->evt_buf; -} - -static void iommu_enable_event_buffer(struct amd_iommu *iommu) -{ - u64 entry; - - BUG_ON(iommu->evt_buf == NULL); - - entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; - - memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, - &entry, sizeof(entry)); - - /* set head and tail to zero manually */ - writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); - - iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); -} - -static void __init free_event_buffer(struct amd_iommu *iommu) -{ - free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); -} - -/* sets a specific bit in the device table entry. */ -static void set_dev_entry_bit(u16 devid, u8 bit) -{ - int i = (bit >> 5) & 0x07; - int _bit = bit & 0x1f; - - amd_iommu_dev_table[devid].data[i] |= (1 << _bit); -} - -static int get_dev_entry_bit(u16 devid, u8 bit) -{ - int i = (bit >> 5) & 0x07; - int _bit = bit & 0x1f; - - return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit; -} - - -void amd_iommu_apply_erratum_63(u16 devid) -{ - int sysmgt; - - sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | - (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); - - if (sysmgt == 0x01) - set_dev_entry_bit(devid, DEV_ENTRY_IW); -} - -/* Writes the specific IOMMU for a device into the rlookup table */ -static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) -{ - amd_iommu_rlookup_table[devid] = iommu; -} - -/* - * This function takes the device specific flags read from the ACPI - * table and sets up the device table entry with that information - */ -static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, - u16 devid, u32 flags, u32 ext_flags) -{ - if (flags & ACPI_DEVFLAG_INITPASS) - set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); - if (flags & ACPI_DEVFLAG_EXTINT) - set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); - if (flags & ACPI_DEVFLAG_NMI) - set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); - if (flags & ACPI_DEVFLAG_SYSMGT1) - set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); - if (flags & ACPI_DEVFLAG_SYSMGT2) - set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); - if (flags & ACPI_DEVFLAG_LINT0) - set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); - if (flags & ACPI_DEVFLAG_LINT1) - set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); - - amd_iommu_apply_erratum_63(devid); - - set_iommu_for_device(iommu, devid); -} - -/* - * Reads the device exclusion range from ACPI and initialize IOMMU with - * it - */ -static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) -{ - struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; - - if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) - return; - - if (iommu) { - /* - * We only can configure exclusion ranges per IOMMU, not - * per device. But we can enable the exclusion range per - * device. This is done here - */ - set_dev_entry_bit(m->devid, DEV_ENTRY_EX); - iommu->exclusion_start = m->range_start; - iommu->exclusion_length = m->range_length; - } -} - -/* - * This function reads some important data from the IOMMU PCI space and - * initializes the driver data structure with it. It reads the hardware - * capabilities and the first/last device entries - */ -static void __init init_iommu_from_pci(struct amd_iommu *iommu) -{ - int cap_ptr = iommu->cap_ptr; - u32 range, misc, low, high; - int i, j; - - pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, - &iommu->cap); - pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, - &range); - pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, - &misc); - - iommu->first_device = calc_devid(MMIO_GET_BUS(range), - MMIO_GET_FD(range)); - iommu->last_device = calc_devid(MMIO_GET_BUS(range), - MMIO_GET_LD(range)); - iommu->evt_msi_num = MMIO_MSI_NUM(misc); - - if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) - amd_iommu_iotlb_sup = false; - - /* read extended feature bits */ - low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); - high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); - - iommu->features = ((u64)high << 32) | low; - - if (!is_rd890_iommu(iommu->dev)) - return; - - /* - * Some rd890 systems may not be fully reconfigured by the BIOS, so - * it's necessary for us to store this information so it can be - * reprogrammed on resume - */ - - pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, - &iommu->stored_addr_lo); - pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, - &iommu->stored_addr_hi); - - /* Low bit locks writes to configuration space */ - iommu->stored_addr_lo &= ~1; - - for (i = 0; i < 6; i++) - for (j = 0; j < 0x12; j++) - iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); - - for (i = 0; i < 0x83; i++) - iommu->stored_l2[i] = iommu_read_l2(iommu, i); -} - -/* - * Takes a pointer to an AMD IOMMU entry in the ACPI table and - * initializes the hardware and our data structures with it. - */ -static void __init init_iommu_from_acpi(struct amd_iommu *iommu, - struct ivhd_header *h) -{ - u8 *p = (u8 *)h; - u8 *end = p, flags = 0; - u16 devid = 0, devid_start = 0, devid_to = 0; - u32 dev_i, ext_flags = 0; - bool alias = false; - struct ivhd_entry *e; - - /* - * First save the recommended feature enable bits from ACPI - */ - iommu->acpi_flags = h->flags; - - /* - * Done. Now parse the device entries - */ - p += sizeof(struct ivhd_header); - end += h->length; - - - while (p < end) { - e = (struct ivhd_entry *)p; - switch (e->type) { - case IVHD_DEV_ALL: - - DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x" - " last device %02x:%02x.%x flags: %02x\n", - PCI_BUS(iommu->first_device), - PCI_SLOT(iommu->first_device), - PCI_FUNC(iommu->first_device), - PCI_BUS(iommu->last_device), - PCI_SLOT(iommu->last_device), - PCI_FUNC(iommu->last_device), - e->flags); - - for (dev_i = iommu->first_device; - dev_i <= iommu->last_device; ++dev_i) - set_dev_entry_from_acpi(iommu, dev_i, - e->flags, 0); - break; - case IVHD_DEV_SELECT: - - DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " - "flags: %02x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags); - - devid = e->devid; - set_dev_entry_from_acpi(iommu, devid, e->flags, 0); - break; - case IVHD_DEV_SELECT_RANGE_START: - - DUMP_printk(" DEV_SELECT_RANGE_START\t " - "devid: %02x:%02x.%x flags: %02x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags); - - devid_start = e->devid; - flags = e->flags; - ext_flags = 0; - alias = false; - break; - case IVHD_DEV_ALIAS: - - DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " - "flags: %02x devid_to: %02x:%02x.%x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, - PCI_BUS(e->ext >> 8), - PCI_SLOT(e->ext >> 8), - PCI_FUNC(e->ext >> 8)); - - devid = e->devid; - devid_to = e->ext >> 8; - set_dev_entry_from_acpi(iommu, devid , e->flags, 0); - set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); - amd_iommu_alias_table[devid] = devid_to; - break; - case IVHD_DEV_ALIAS_RANGE: - - DUMP_printk(" DEV_ALIAS_RANGE\t\t " - "devid: %02x:%02x.%x flags: %02x " - "devid_to: %02x:%02x.%x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, - PCI_BUS(e->ext >> 8), - PCI_SLOT(e->ext >> 8), - PCI_FUNC(e->ext >> 8)); - - devid_start = e->devid; - flags = e->flags; - devid_to = e->ext >> 8; - ext_flags = 0; - alias = true; - break; - case IVHD_DEV_EXT_SELECT: - - DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " - "flags: %02x ext: %08x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, e->ext); - - devid = e->devid; - set_dev_entry_from_acpi(iommu, devid, e->flags, - e->ext); - break; - case IVHD_DEV_EXT_SELECT_RANGE: - - DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " - "%02x:%02x.%x flags: %02x ext: %08x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, e->ext); - - devid_start = e->devid; - flags = e->flags; - ext_flags = e->ext; - alias = false; - break; - case IVHD_DEV_RANGE_END: - - DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid)); - - devid = e->devid; - for (dev_i = devid_start; dev_i <= devid; ++dev_i) { - if (alias) { - amd_iommu_alias_table[dev_i] = devid_to; - set_dev_entry_from_acpi(iommu, - devid_to, flags, ext_flags); - } - set_dev_entry_from_acpi(iommu, dev_i, - flags, ext_flags); - } - break; - default: - break; - } - - p += ivhd_entry_length(p); - } -} - -/* Initializes the device->iommu mapping for the driver */ -static int __init init_iommu_devices(struct amd_iommu *iommu) -{ - u32 i; - - for (i = iommu->first_device; i <= iommu->last_device; ++i) - set_iommu_for_device(iommu, i); - - return 0; -} - -static void __init free_iommu_one(struct amd_iommu *iommu) -{ - free_command_buffer(iommu); - free_event_buffer(iommu); - iommu_unmap_mmio_space(iommu); -} - -static void __init free_iommu_all(void) -{ - struct amd_iommu *iommu, *next; - - for_each_iommu_safe(iommu, next) { - list_del(&iommu->list); - free_iommu_one(iommu); - kfree(iommu); - } -} - -/* - * This function clues the initialization function for one IOMMU - * together and also allocates the command buffer and programs the - * hardware. It does NOT enable the IOMMU. This is done afterwards. - */ -static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) -{ - spin_lock_init(&iommu->lock); - - /* Add IOMMU to internal data structures */ - list_add_tail(&iommu->list, &amd_iommu_list); - iommu->index = amd_iommus_present++; - - if (unlikely(iommu->index >= MAX_IOMMUS)) { - WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); - return -ENOSYS; - } - - /* Index is fine - add IOMMU to the array */ - amd_iommus[iommu->index] = iommu; - - /* - * Copy data from ACPI table entry to the iommu struct - */ - iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff); - if (!iommu->dev) - return 1; - - iommu->cap_ptr = h->cap_ptr; - iommu->pci_seg = h->pci_seg; - iommu->mmio_phys = h->mmio_phys; - iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); - if (!iommu->mmio_base) - return -ENOMEM; - - iommu->cmd_buf = alloc_command_buffer(iommu); - if (!iommu->cmd_buf) - return -ENOMEM; - - iommu->evt_buf = alloc_event_buffer(iommu); - if (!iommu->evt_buf) - return -ENOMEM; - - iommu->int_enabled = false; - - init_iommu_from_pci(iommu); - init_iommu_from_acpi(iommu, h); - init_iommu_devices(iommu); - - if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) - amd_iommu_np_cache = true; - - return pci_enable_device(iommu->dev); -} - -/* - * Iterates over all IOMMU entries in the ACPI table, allocates the - * IOMMU structure and initializes it with init_iommu_one() - */ -static int __init init_iommu_all(struct acpi_table_header *table) -{ - u8 *p = (u8 *)table, *end = (u8 *)table; - struct ivhd_header *h; - struct amd_iommu *iommu; - int ret; - - end += table->length; - p += IVRS_HEADER_LENGTH; - - while (p < end) { - h = (struct ivhd_header *)p; - switch (*p) { - case ACPI_IVHD_TYPE: - - DUMP_printk("device: %02x:%02x.%01x cap: %04x " - "seg: %d flags: %01x info %04x\n", - PCI_BUS(h->devid), PCI_SLOT(h->devid), - PCI_FUNC(h->devid), h->cap_ptr, - h->pci_seg, h->flags, h->info); - DUMP_printk(" mmio-addr: %016llx\n", - h->mmio_phys); - - iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); - if (iommu == NULL) { - amd_iommu_init_err = -ENOMEM; - return 0; - } - - ret = init_iommu_one(iommu, h); - if (ret) { - amd_iommu_init_err = ret; - return 0; - } - break; - default: - break; - } - p += h->length; - - } - WARN_ON(p != end); - - return 0; -} - -/**************************************************************************** - * - * The following functions initialize the MSI interrupts for all IOMMUs - * in the system. Its a bit challenging because there could be multiple - * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per - * pci_dev. - * - ****************************************************************************/ - -static int iommu_setup_msi(struct amd_iommu *iommu) -{ - int r; - - if (pci_enable_msi(iommu->dev)) - return 1; - - r = request_threaded_irq(iommu->dev->irq, - amd_iommu_int_handler, - amd_iommu_int_thread, - 0, "AMD-Vi", - iommu->dev); - - if (r) { - pci_disable_msi(iommu->dev); - return 1; - } - - iommu->int_enabled = true; - iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); - - return 0; -} - -static int iommu_init_msi(struct amd_iommu *iommu) -{ - if (iommu->int_enabled) - return 0; - - if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) - return iommu_setup_msi(iommu); - - return 1; -} - -/**************************************************************************** - * - * The next functions belong to the third pass of parsing the ACPI - * table. In this last pass the memory mapping requirements are - * gathered (like exclusion and unity mapping reanges). - * - ****************************************************************************/ - -static void __init free_unity_maps(void) -{ - struct unity_map_entry *entry, *next; - - list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { - list_del(&entry->list); - kfree(entry); - } -} - -/* called when we find an exclusion range definition in ACPI */ -static int __init init_exclusion_range(struct ivmd_header *m) -{ - int i; - - switch (m->type) { - case ACPI_IVMD_TYPE: - set_device_exclusion_range(m->devid, m); - break; - case ACPI_IVMD_TYPE_ALL: - for (i = 0; i <= amd_iommu_last_bdf; ++i) - set_device_exclusion_range(i, m); - break; - case ACPI_IVMD_TYPE_RANGE: - for (i = m->devid; i <= m->aux; ++i) - set_device_exclusion_range(i, m); - break; - default: - break; - } - - return 0; -} - -/* called for unity map ACPI definition */ -static int __init init_unity_map_range(struct ivmd_header *m) -{ - struct unity_map_entry *e = 0; - char *s; - - e = kzalloc(sizeof(*e), GFP_KERNEL); - if (e == NULL) - return -ENOMEM; - - switch (m->type) { - default: - kfree(e); - return 0; - case ACPI_IVMD_TYPE: - s = "IVMD_TYPEi\t\t\t"; - e->devid_start = e->devid_end = m->devid; - break; - case ACPI_IVMD_TYPE_ALL: - s = "IVMD_TYPE_ALL\t\t"; - e->devid_start = 0; - e->devid_end = amd_iommu_last_bdf; - break; - case ACPI_IVMD_TYPE_RANGE: - s = "IVMD_TYPE_RANGE\t\t"; - e->devid_start = m->devid; - e->devid_end = m->aux; - break; - } - e->address_start = PAGE_ALIGN(m->range_start); - e->address_end = e->address_start + PAGE_ALIGN(m->range_length); - e->prot = m->flags >> 1; - - DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" - " range_start: %016llx range_end: %016llx flags: %x\n", s, - PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start), - PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end), - PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), - e->address_start, e->address_end, m->flags); - - list_add_tail(&e->list, &amd_iommu_unity_map); - - return 0; -} - -/* iterates over all memory definitions we find in the ACPI table */ -static int __init init_memory_definitions(struct acpi_table_header *table) -{ - u8 *p = (u8 *)table, *end = (u8 *)table; - struct ivmd_header *m; - - end += table->length; - p += IVRS_HEADER_LENGTH; - - while (p < end) { - m = (struct ivmd_header *)p; - if (m->flags & IVMD_FLAG_EXCL_RANGE) - init_exclusion_range(m); - else if (m->flags & IVMD_FLAG_UNITY_MAP) - init_unity_map_range(m); - - p += m->length; - } - - return 0; -} - -/* - * Init the device table to not allow DMA access for devices and - * suppress all page faults - */ -static void init_device_table(void) -{ - u32 devid; - - for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { - set_dev_entry_bit(devid, DEV_ENTRY_VALID); - set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); - } -} - -static void iommu_init_flags(struct amd_iommu *iommu) -{ - iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : - iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); - - iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : - iommu_feature_disable(iommu, CONTROL_PASSPW_EN); - - iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : - iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); - - iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_ISOC_EN) : - iommu_feature_disable(iommu, CONTROL_ISOC_EN); - - /* - * make IOMMU memory accesses cache coherent - */ - iommu_feature_enable(iommu, CONTROL_COHERENT_EN); -} - -static void iommu_apply_resume_quirks(struct amd_iommu *iommu) -{ - int i, j; - u32 ioc_feature_control; - struct pci_dev *pdev = NULL; - - /* RD890 BIOSes may not have completely reconfigured the iommu */ - if (!is_rd890_iommu(iommu->dev)) - return; - - /* - * First, we need to ensure that the iommu is enabled. This is - * controlled by a register in the northbridge - */ - pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); - - if (!pdev) - return; - - /* Select Northbridge indirect register 0x75 and enable writing */ - pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); - pci_read_config_dword(pdev, 0x64, &ioc_feature_control); - - /* Enable the iommu */ - if (!(ioc_feature_control & 0x1)) - pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); - - pci_dev_put(pdev); - - /* Restore the iommu BAR */ - pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, - iommu->stored_addr_lo); - pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, - iommu->stored_addr_hi); - - /* Restore the l1 indirect regs for each of the 6 l1s */ - for (i = 0; i < 6; i++) - for (j = 0; j < 0x12; j++) - iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); - - /* Restore the l2 indirect regs */ - for (i = 0; i < 0x83; i++) - iommu_write_l2(iommu, i, iommu->stored_l2[i]); - - /* Lock PCI setup registers */ - pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, - iommu->stored_addr_lo | 1); -} - -/* - * This function finally enables all IOMMUs found in the system after - * they have been initialized - */ -static void enable_iommus(void) -{ - struct amd_iommu *iommu; - - for_each_iommu(iommu) { - iommu_disable(iommu); - iommu_init_flags(iommu); - iommu_set_device_table(iommu); - iommu_enable_command_buffer(iommu); - iommu_enable_event_buffer(iommu); - iommu_set_exclusion_range(iommu); - iommu_init_msi(iommu); - iommu_enable(iommu); - iommu_flush_all_caches(iommu); - } -} - -static void disable_iommus(void) -{ - struct amd_iommu *iommu; - - for_each_iommu(iommu) - iommu_disable(iommu); -} - -/* - * Suspend/Resume support - * disable suspend until real resume implemented - */ - -static void amd_iommu_resume(void) -{ - struct amd_iommu *iommu; - - for_each_iommu(iommu) - iommu_apply_resume_quirks(iommu); - - /* re-load the hardware */ - enable_iommus(); - - /* - * we have to flush after the IOMMUs are enabled because a - * disabled IOMMU will never execute the commands we send - */ - for_each_iommu(iommu) - iommu_flush_all_caches(iommu); -} - -static int amd_iommu_suspend(void) -{ - /* disable IOMMUs to go out of the way for BIOS */ - disable_iommus(); - - return 0; -} - -static struct syscore_ops amd_iommu_syscore_ops = { - .suspend = amd_iommu_suspend, - .resume = amd_iommu_resume, -}; - -/* - * This is the core init function for AMD IOMMU hardware in the system. - * This function is called from the generic x86 DMA layer initialization - * code. - * - * This function basically parses the ACPI table for AMD IOMMU (IVRS) - * three times: - * - * 1 pass) Find the highest PCI device id the driver has to handle. - * Upon this information the size of the data structures is - * determined that needs to be allocated. - * - * 2 pass) Initialize the data structures just allocated with the - * information in the ACPI table about available AMD IOMMUs - * in the system. It also maps the PCI devices in the - * system to specific IOMMUs - * - * 3 pass) After the basic data structures are allocated and - * initialized we update them with information about memory - * remapping requirements parsed out of the ACPI table in - * this last pass. - * - * After that the hardware is initialized and ready to go. In the last - * step we do some Linux specific things like registering the driver in - * the dma_ops interface and initializing the suspend/resume support - * functions. Finally it prints some information about AMD IOMMUs and - * the driver state and enables the hardware. - */ -static int __init amd_iommu_init(void) -{ - int i, ret = 0; - - /* - * First parse ACPI tables to find the largest Bus/Dev/Func - * we need to handle. Upon this information the shared data - * structures for the IOMMUs in the system will be allocated - */ - if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) - return -ENODEV; - - ret = amd_iommu_init_err; - if (ret) - goto out; - - dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); - alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); - rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); - - ret = -ENOMEM; - - /* Device table - directly used by all IOMMUs */ - amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(dev_table_size)); - if (amd_iommu_dev_table == NULL) - goto out; - - /* - * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the - * IOMMU see for that device - */ - amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, - get_order(alias_table_size)); - if (amd_iommu_alias_table == NULL) - goto free; - - /* IOMMU rlookup table - find the IOMMU for a specific device */ - amd_iommu_rlookup_table = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(rlookup_table_size)); - if (amd_iommu_rlookup_table == NULL) - goto free; - - amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(MAX_DOMAIN_ID/8)); - if (amd_iommu_pd_alloc_bitmap == NULL) - goto free; - - /* init the device table */ - init_device_table(); - - /* - * let all alias entries point to itself - */ - for (i = 0; i <= amd_iommu_last_bdf; ++i) - amd_iommu_alias_table[i] = i; - - /* - * never allocate domain 0 because its used as the non-allocated and - * error value placeholder - */ - amd_iommu_pd_alloc_bitmap[0] = 1; - - spin_lock_init(&amd_iommu_pd_lock); - - /* - * now the data structures are allocated and basically initialized - * start the real acpi table scan - */ - ret = -ENODEV; - if (acpi_table_parse("IVRS", init_iommu_all) != 0) - goto free; - - if (amd_iommu_init_err) { - ret = amd_iommu_init_err; - goto free; - } - - if (acpi_table_parse("IVRS", init_memory_definitions) != 0) - goto free; - - if (amd_iommu_init_err) { - ret = amd_iommu_init_err; - goto free; - } - - ret = amd_iommu_init_devices(); - if (ret) - goto free; - - enable_iommus(); - - if (iommu_pass_through) - ret = amd_iommu_init_passthrough(); - else - ret = amd_iommu_init_dma_ops(); - - if (ret) - goto free_disable; - - amd_iommu_init_api(); - - amd_iommu_init_notifier(); - - register_syscore_ops(&amd_iommu_syscore_ops); - - if (iommu_pass_through) - goto out; - - if (amd_iommu_unmap_flush) - printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); - else - printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); - - x86_platform.iommu_shutdown = disable_iommus; -out: - return ret; - -free_disable: - disable_iommus(); - -free: - amd_iommu_uninit_devices(); - - free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, - get_order(MAX_DOMAIN_ID/8)); - - free_pages((unsigned long)amd_iommu_rlookup_table, - get_order(rlookup_table_size)); - - free_pages((unsigned long)amd_iommu_alias_table, - get_order(alias_table_size)); - - free_pages((unsigned long)amd_iommu_dev_table, - get_order(dev_table_size)); - - free_iommu_all(); - - free_unity_maps(); - -#ifdef CONFIG_GART_IOMMU - /* - * We failed to initialize the AMD IOMMU - try fallback to GART - * if possible. - */ - gart_iommu_init(); - -#endif - - goto out; -} - -/**************************************************************************** - * - * Early detect code. This code runs at IOMMU detection time in the DMA - * layer. It just looks if there is an IVRS ACPI table to detect AMD - * IOMMUs - * - ****************************************************************************/ -static int __init early_amd_iommu_detect(struct acpi_table_header *table) -{ - return 0; -} - -int __init amd_iommu_detect(void) -{ - if (no_iommu || (iommu_detected && !gart_iommu_aperture)) - return -ENODEV; - - if (amd_iommu_disabled) - return -ENODEV; - - if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { - iommu_detected = 1; - amd_iommu_detected = 1; - x86_init.iommu.iommu_init = amd_iommu_init; - - /* Make sure ACS will be enabled */ - pci_request_acs(); - return 1; - } - return -ENODEV; -} - -/**************************************************************************** - * - * Parsing functions for the AMD IOMMU specific kernel command line - * options. - * - ****************************************************************************/ - -static int __init parse_amd_iommu_dump(char *str) -{ - amd_iommu_dump = true; - - return 1; -} - -static int __init parse_amd_iommu_options(char *str) -{ - for (; *str; ++str) { - if (strncmp(str, "fullflush", 9) == 0) - amd_iommu_unmap_flush = true; - if (strncmp(str, "off", 3) == 0) - amd_iommu_disabled = true; - } - - return 1; -} - -__setup("amd_iommu_dump", parse_amd_iommu_dump); -__setup("amd_iommu=", parse_amd_iommu_options); - -IOMMU_INIT_FINISH(amd_iommu_detect, - gart_iommu_hole_init, - 0, - 0); diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index c29d631af6f..395a10e6806 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -63,7 +63,6 @@ void foo(void) BLANK(); OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending); - OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir); BLANK(); OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3a0338b4b17..4ee3abf20ed 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -22,7 +22,6 @@ #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/slab.h> -#include <linux/highmem.h> #include <linux/cpu.h> #include <linux/bitops.h> @@ -45,38 +44,27 @@ do { \ #endif /* - * best effort, GUP based copy_from_user() that assumes IRQ or NMI context + * | NHM/WSM | SNB | + * register ------------------------------- + * | HT | no HT | HT | no HT | + *----------------------------------------- + * offcore | core | core | cpu | core | + * lbr_sel | core | core | cpu | core | + * ld_lat | cpu | core | cpu | core | + *----------------------------------------- + * + * Given that there is a small number of shared regs, + * we can pre-allocate their slot in the per-cpu + * per-core reg tables. */ -static unsigned long -copy_from_user_nmi(void *to, const void __user *from, unsigned long n) -{ - unsigned long offset, addr = (unsigned long)from; - unsigned long size, len = 0; - struct page *page; - void *map; - int ret; - - do { - ret = __get_user_pages_fast(addr, 1, 0, &page); - if (!ret) - break; - - offset = addr & (PAGE_SIZE - 1); - size = min(PAGE_SIZE - offset, n - len); - - map = kmap_atomic(page); - memcpy(to, map+offset, size); - kunmap_atomic(map); - put_page(page); +enum extra_reg_type { + EXTRA_REG_NONE = -1, /* not used */ - len += size; - to += size; - addr += size; + EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ + EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ - } while (len < n); - - return len; -} + EXTRA_REG_MAX /* number of entries needed */ +}; struct event_constraint { union { @@ -132,11 +120,10 @@ struct cpu_hw_events { struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; /* - * Intel percore register state. - * Coordinate shared resources between HT threads. + * manage shared (per-core, per-cpu) registers + * used on Intel NHM/WSM/SNB */ - int percore_used; /* Used by this CPU? */ - struct intel_percore *per_core; + struct intel_shared_regs *shared_regs; /* * AMD specific bits @@ -187,26 +174,45 @@ struct cpu_hw_events { for ((e) = (c); (e)->weight; (e)++) /* + * Per register state. + */ +struct er_account { + raw_spinlock_t lock; /* per-core: protect structure */ + u64 config; /* extra MSR config */ + u64 reg; /* extra MSR number */ + atomic_t ref; /* reference count */ +}; + +/* * Extra registers for specific events. + * * Some events need large masks and require external MSRs. - * Define a mapping to these extra registers. + * Those extra MSRs end up being shared for all events on + * a PMU and sometimes between PMU of sibling HT threads. + * In either case, the kernel needs to handle conflicting + * accesses to those extra, shared, regs. The data structure + * to manage those registers is stored in cpu_hw_event. */ struct extra_reg { unsigned int event; unsigned int msr; u64 config_mask; u64 valid_mask; + int idx; /* per_xxx->regs[] reg index */ }; -#define EVENT_EXTRA_REG(e, ms, m, vm) { \ +#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ .event = (e), \ .msr = (ms), \ .config_mask = (m), \ .valid_mask = (vm), \ + .idx = EXTRA_REG_##i \ } -#define INTEL_EVENT_EXTRA_REG(event, msr, vm) \ - EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm) -#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0) + +#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ + EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) + +#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) union perf_capabilities { struct { @@ -252,7 +258,6 @@ struct x86_pmu { void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); struct event_constraint *event_constraints; - struct event_constraint *percore_constraints; void (*quirks)(void); int perfctr_second_write; @@ -286,8 +291,12 @@ struct x86_pmu { * Extra registers for events */ struct extra_reg *extra_regs; + unsigned int er_flags; }; +#define ERF_NO_HT_SHARING 1 +#define ERF_HAS_RSP_1 2 + static struct x86_pmu x86_pmu __read_mostly; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { @@ -393,10 +402,10 @@ static inline unsigned int x86_pmu_event_addr(int index) */ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) { + struct hw_perf_event_extra *reg; struct extra_reg *er; - event->hw.extra_reg = 0; - event->hw.extra_config = 0; + reg = &event->hw.extra_reg; if (!x86_pmu.extra_regs) return 0; @@ -406,8 +415,10 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) continue; if (event->attr.config1 & ~er->valid_mask) return -EINVAL; - event->hw.extra_reg = er->msr; - event->hw.extra_config = event->attr.config1; + + reg->idx = er->idx; + reg->config = event->attr.config1; + reg->reg = er->msr; break; } return 0; @@ -706,6 +717,9 @@ static int __x86_pmu_event_init(struct perf_event *event) event->hw.last_cpu = -1; event->hw.last_tag = ~0ULL; + /* mark unused */ + event->hw.extra_reg.idx = EXTRA_REG_NONE; + return x86_pmu.hw_config(event); } @@ -747,8 +761,8 @@ static void x86_pmu_disable(struct pmu *pmu) static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, u64 enable_mask) { - if (hwc->extra_reg) - wrmsrl(hwc->extra_reg, hwc->extra_config); + if (hwc->extra_reg.reg) + wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); wrmsrl(hwc->config_base, hwc->config | enable_mask); } @@ -1332,7 +1346,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) if (!x86_perf_event_set_period(event)) continue; - if (perf_event_overflow(event, 1, &data, regs)) + if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); } @@ -1637,6 +1651,40 @@ static int x86_pmu_commit_txn(struct pmu *pmu) perf_pmu_enable(pmu); return 0; } +/* + * a fake_cpuc is used to validate event groups. Due to + * the extra reg logic, we need to also allocate a fake + * per_core and per_cpu structure. Otherwise, group events + * using extra reg may conflict without the kernel being + * able to catch this when the last event gets added to + * the group. + */ +static void free_fake_cpuc(struct cpu_hw_events *cpuc) +{ + kfree(cpuc->shared_regs); + kfree(cpuc); +} + +static struct cpu_hw_events *allocate_fake_cpuc(void) +{ + struct cpu_hw_events *cpuc; + int cpu = raw_smp_processor_id(); + + cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); + if (!cpuc) + return ERR_PTR(-ENOMEM); + + /* only needed, if we have extra_regs */ + if (x86_pmu.extra_regs) { + cpuc->shared_regs = allocate_shared_regs(cpu); + if (!cpuc->shared_regs) + goto error; + } + return cpuc; +error: + free_fake_cpuc(cpuc); + return ERR_PTR(-ENOMEM); +} /* * validate that we can schedule this event @@ -1647,9 +1695,9 @@ static int validate_event(struct perf_event *event) struct event_constraint *c; int ret = 0; - fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); - if (!fake_cpuc) - return -ENOMEM; + fake_cpuc = allocate_fake_cpuc(); + if (IS_ERR(fake_cpuc)) + return PTR_ERR(fake_cpuc); c = x86_pmu.get_event_constraints(fake_cpuc, event); @@ -1659,7 +1707,7 @@ static int validate_event(struct perf_event *event) if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(fake_cpuc, event); - kfree(fake_cpuc); + free_fake_cpuc(fake_cpuc); return ret; } @@ -1679,36 +1727,32 @@ static int validate_group(struct perf_event *event) { struct perf_event *leader = event->group_leader; struct cpu_hw_events *fake_cpuc; - int ret, n; - - ret = -ENOMEM; - fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); - if (!fake_cpuc) - goto out; + int ret = -ENOSPC, n; + fake_cpuc = allocate_fake_cpuc(); + if (IS_ERR(fake_cpuc)) + return PTR_ERR(fake_cpuc); /* * the event is not yet connected with its * siblings therefore we must first collect * existing siblings, then add the new event * before we can simulate the scheduling */ - ret = -ENOSPC; n = collect_events(fake_cpuc, leader, true); if (n < 0) - goto out_free; + goto out; fake_cpuc->n_events = n; n = collect_events(fake_cpuc, event, false); if (n < 0) - goto out_free; + goto out; fake_cpuc->n_events = n; ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); -out_free: - kfree(fake_cpuc); out: + free_fake_cpuc(fake_cpuc); return ret; } diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index fe29c1d2219..941caa2e449 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -89,6 +89,20 @@ static __initconst const u64 amd_hw_cache_event_ids [ C(RESULT_MISS) ] = -1, }, }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */ + [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, }; /* diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 41178c826c4..45fbb8f7f54 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1,25 +1,15 @@ #ifdef CONFIG_CPU_SUP_INTEL -#define MAX_EXTRA_REGS 2 - -/* - * Per register state. - */ -struct er_account { - int ref; /* reference count */ - unsigned int extra_reg; /* extra MSR number */ - u64 extra_config; /* extra MSR config */ -}; - /* - * Per core state - * This used to coordinate shared registers for HT threads. + * Per core/cpu state + * + * Used to coordinate shared registers between HT threads or + * among events on a single PMU. */ -struct intel_percore { - raw_spinlock_t lock; /* protect structure */ - struct er_account regs[MAX_EXTRA_REGS]; - int refcnt; /* number of threads */ - unsigned core_id; +struct intel_shared_regs { + struct er_account regs[EXTRA_REG_MAX]; + int refcnt; /* per-core: #HT threads */ + unsigned core_id; /* per-core: core id */ }; /* @@ -88,16 +78,10 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), EVENT_EXTRA_END }; -static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly = -{ - INTEL_EVENT_CONSTRAINT(0xb7, 0), - EVENT_CONSTRAINT_END -}; - static struct event_constraint intel_westmere_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ @@ -116,8 +100,6 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ - INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */ - INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ EVENT_CONSTRAINT_END @@ -125,15 +107,13 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = static struct extra_reg intel_westmere_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), EVENT_EXTRA_END }; -static struct event_constraint intel_westmere_percore_constraints[] __read_mostly = +static struct event_constraint intel_v1_event_constraints[] __read_mostly = { - INTEL_EVENT_CONSTRAINT(0xb7, 0), - INTEL_EVENT_CONSTRAINT(0xbb, 0), EVENT_CONSTRAINT_END }; @@ -145,6 +125,12 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; +static struct extra_reg intel_snb_extra_regs[] __read_mostly = { + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), + EVENT_EXTRA_END +}; + static u64 intel_pmu_event_map(int hw_event) { return intel_perfmon_event_map[hw_event]; @@ -245,6 +231,21 @@ static __initconst const u64 snb_hw_cache_event_ids [ C(RESULT_MISS) ] = -1, }, }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + }; static __initconst const u64 westmere_hw_cache_event_ids @@ -346,6 +347,20 @@ static __initconst const u64 westmere_hw_cache_event_ids [ C(RESULT_MISS) ] = -1, }, }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + }, }; /* @@ -398,7 +413,21 @@ static __initconst const u64 nehalem_hw_cache_extra_regs [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, }, - } + }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_ALL_DRAM, + [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE_DRAM, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_ALL_DRAM, + [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE_DRAM, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_ALL_DRAM, + [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE_DRAM, + }, + }, }; static __initconst const u64 nehalem_hw_cache_event_ids @@ -500,6 +529,20 @@ static __initconst const u64 nehalem_hw_cache_event_ids [ C(RESULT_MISS) ] = -1, }, }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + }, }; static __initconst const u64 core2_hw_cache_event_ids @@ -1003,7 +1046,7 @@ again: data.period = event->hw.last_period; - if (perf_event_overflow(event, 1, &data, regs)) + if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); } @@ -1037,65 +1080,121 @@ intel_bts_constraints(struct perf_event *event) return NULL; } +static bool intel_try_alt_er(struct perf_event *event, int orig_idx) +{ + if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) + return false; + + if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { + event->hw.config &= ~INTEL_ARCH_EVENT_MASK; + event->hw.config |= 0x01bb; + event->hw.extra_reg.idx = EXTRA_REG_RSP_1; + event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; + } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) { + event->hw.config &= ~INTEL_ARCH_EVENT_MASK; + event->hw.config |= 0x01b7; + event->hw.extra_reg.idx = EXTRA_REG_RSP_0; + event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; + } + + if (event->hw.extra_reg.idx == orig_idx) + return false; + + return true; +} + +/* + * manage allocation of shared extra msr for certain events + * + * sharing can be: + * per-cpu: to be shared between the various events on a single PMU + * per-core: per-cpu + shared by HT threads + */ static struct event_constraint * -intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) +__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, + struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; - unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT; - struct event_constraint *c; - struct intel_percore *pc; + struct event_constraint *c = &emptyconstraint; + struct hw_perf_event_extra *reg = &event->hw.extra_reg; struct er_account *era; - int i; - int free_slot; - int found; + unsigned long flags; + int orig_idx = reg->idx; - if (!x86_pmu.percore_constraints || hwc->extra_alloc) - return NULL; + /* already allocated shared msr */ + if (reg->alloc) + return &unconstrained; - for (c = x86_pmu.percore_constraints; c->cmask; c++) { - if (e != c->code) - continue; +again: + era = &cpuc->shared_regs->regs[reg->idx]; + /* + * we use spin_lock_irqsave() to avoid lockdep issues when + * passing a fake cpuc + */ + raw_spin_lock_irqsave(&era->lock, flags); + + if (!atomic_read(&era->ref) || era->config == reg->config) { + + /* lock in msr value */ + era->config = reg->config; + era->reg = reg->reg; + + /* one more user */ + atomic_inc(&era->ref); + + /* no need to reallocate during incremental event scheduling */ + reg->alloc = 1; /* - * Allocate resource per core. + * All events using extra_reg are unconstrained. + * Avoids calling x86_get_event_constraints() + * + * Must revisit if extra_reg controlling events + * ever have constraints. Worst case we go through + * the regular event constraint table. */ - pc = cpuc->per_core; - if (!pc) - break; - c = &emptyconstraint; - raw_spin_lock(&pc->lock); - free_slot = -1; - found = 0; - for (i = 0; i < MAX_EXTRA_REGS; i++) { - era = &pc->regs[i]; - if (era->ref > 0 && hwc->extra_reg == era->extra_reg) { - /* Allow sharing same config */ - if (hwc->extra_config == era->extra_config) { - era->ref++; - cpuc->percore_used = 1; - hwc->extra_alloc = 1; - c = NULL; - } - /* else conflict */ - found = 1; - break; - } else if (era->ref == 0 && free_slot == -1) - free_slot = i; - } - if (!found && free_slot != -1) { - era = &pc->regs[free_slot]; - era->ref = 1; - era->extra_reg = hwc->extra_reg; - era->extra_config = hwc->extra_config; - cpuc->percore_used = 1; - hwc->extra_alloc = 1; - c = NULL; - } - raw_spin_unlock(&pc->lock); - return c; + c = &unconstrained; + } else if (intel_try_alt_er(event, orig_idx)) { + raw_spin_unlock(&era->lock); + goto again; } + raw_spin_unlock_irqrestore(&era->lock, flags); - return NULL; + return c; +} + +static void +__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, + struct hw_perf_event_extra *reg) +{ + struct er_account *era; + + /* + * only put constraint if extra reg was actually + * allocated. Also takes care of event which do + * not use an extra shared reg + */ + if (!reg->alloc) + return; + + era = &cpuc->shared_regs->regs[reg->idx]; + + /* one fewer user */ + atomic_dec(&era->ref); + + /* allocate again next time */ + reg->alloc = 0; +} + +static struct event_constraint * +intel_shared_regs_constraints(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + struct event_constraint *c = NULL; + + if (event->hw.extra_reg.idx != EXTRA_REG_NONE) + c = __intel_shared_reg_get_constraints(cpuc, event); + + return c; } static struct event_constraint * @@ -1111,49 +1210,28 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event if (c) return c; - c = intel_percore_constraints(cpuc, event); + c = intel_shared_regs_constraints(cpuc, event); if (c) return c; return x86_get_event_constraints(cpuc, event); } -static void intel_put_event_constraints(struct cpu_hw_events *cpuc, +static void +intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { - struct extra_reg *er; - struct intel_percore *pc; - struct er_account *era; - struct hw_perf_event *hwc = &event->hw; - int i, allref; + struct hw_perf_event_extra *reg; - if (!cpuc->percore_used) - return; - - for (er = x86_pmu.extra_regs; er->msr; er++) { - if (er->event != (hwc->config & er->config_mask)) - continue; + reg = &event->hw.extra_reg; + if (reg->idx != EXTRA_REG_NONE) + __intel_shared_reg_put_constraints(cpuc, reg); +} - pc = cpuc->per_core; - raw_spin_lock(&pc->lock); - for (i = 0; i < MAX_EXTRA_REGS; i++) { - era = &pc->regs[i]; - if (era->ref > 0 && - era->extra_config == hwc->extra_config && - era->extra_reg == er->msr) { - era->ref--; - hwc->extra_alloc = 0; - break; - } - } - allref = 0; - for (i = 0; i < MAX_EXTRA_REGS; i++) - allref += pc->regs[i].ref; - if (allref == 0) - cpuc->percore_used = 0; - raw_spin_unlock(&pc->lock); - break; - } +static void intel_put_event_constraints(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + intel_put_shared_regs_event_constraints(cpuc, event); } static int intel_pmu_hw_config(struct perf_event *event) @@ -1231,20 +1309,36 @@ static __initconst const struct x86_pmu core_pmu = { .event_constraints = intel_core_event_constraints, }; +static struct intel_shared_regs *allocate_shared_regs(int cpu) +{ + struct intel_shared_regs *regs; + int i; + + regs = kzalloc_node(sizeof(struct intel_shared_regs), + GFP_KERNEL, cpu_to_node(cpu)); + if (regs) { + /* + * initialize the locks to keep lockdep happy + */ + for (i = 0; i < EXTRA_REG_MAX; i++) + raw_spin_lock_init(®s->regs[i].lock); + + regs->core_id = -1; + } + return regs; +} + static int intel_pmu_cpu_prepare(int cpu) { struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); - if (!cpu_has_ht_siblings()) + if (!x86_pmu.extra_regs) return NOTIFY_OK; - cpuc->per_core = kzalloc_node(sizeof(struct intel_percore), - GFP_KERNEL, cpu_to_node(cpu)); - if (!cpuc->per_core) + cpuc->shared_regs = allocate_shared_regs(cpu); + if (!cpuc->shared_regs) return NOTIFY_BAD; - raw_spin_lock_init(&cpuc->per_core->lock); - cpuc->per_core->core_id = -1; return NOTIFY_OK; } @@ -1260,32 +1354,34 @@ static void intel_pmu_cpu_starting(int cpu) */ intel_pmu_lbr_reset(); - if (!cpu_has_ht_siblings()) + if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING)) return; for_each_cpu(i, topology_thread_cpumask(cpu)) { - struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core; + struct intel_shared_regs *pc; + pc = per_cpu(cpu_hw_events, i).shared_regs; if (pc && pc->core_id == core_id) { - kfree(cpuc->per_core); - cpuc->per_core = pc; + kfree(cpuc->shared_regs); + cpuc->shared_regs = pc; break; } } - cpuc->per_core->core_id = core_id; - cpuc->per_core->refcnt++; + cpuc->shared_regs->core_id = core_id; + cpuc->shared_regs->refcnt++; } static void intel_pmu_cpu_dying(int cpu) { struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); - struct intel_percore *pc = cpuc->per_core; + struct intel_shared_regs *pc; + pc = cpuc->shared_regs; if (pc) { if (pc->core_id == -1 || --pc->refcnt == 0) kfree(pc); - cpuc->per_core = NULL; + cpuc->shared_regs = NULL; } fini_debug_store_on_cpu(cpu); @@ -1436,7 +1532,6 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_nehalem_event_constraints; x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; - x86_pmu.percore_constraints = intel_nehalem_percore_constraints; x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; @@ -1481,10 +1576,10 @@ static __init int intel_pmu_init(void) intel_pmu_lbr_init_nhm(); x86_pmu.event_constraints = intel_westmere_event_constraints; - x86_pmu.percore_constraints = intel_westmere_percore_constraints; x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; x86_pmu.extra_regs = intel_westmere_extra_regs; + x86_pmu.er_flags |= ERF_HAS_RSP_1; /* UOPS_ISSUED.STALLED_CYCLES */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; @@ -1502,6 +1597,10 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_events; + x86_pmu.extra_regs = intel_snb_extra_regs; + /* all extra regs are per-cpu when HT is on */ + x86_pmu.er_flags |= ERF_HAS_RSP_1; + x86_pmu.er_flags |= ERF_NO_HT_SHARING; /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; @@ -1512,11 +1611,19 @@ static __init int intel_pmu_init(void) break; default: - /* - * default constraints for v2 and up - */ - x86_pmu.event_constraints = intel_gen_event_constraints; - pr_cont("generic architected perfmon, "); + switch (x86_pmu.version) { + case 1: + x86_pmu.event_constraints = intel_v1_event_constraints; + pr_cont("generic architected perfmon v1, "); + break; + default: + /* + * default constraints for v2 and up + */ + x86_pmu.event_constraints = intel_gen_event_constraints; + pr_cont("generic architected perfmon, "); + break; + } } return 0; } @@ -1528,4 +1635,8 @@ static int intel_pmu_init(void) return 0; } +static struct intel_shared_regs *allocate_shared_regs(int cpu) +{ + return NULL; +} #endif /* CONFIG_CPU_SUP_INTEL */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index bab491b8ee2..1b1ef3addcf 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -340,7 +340,7 @@ static int intel_pmu_drain_bts_buffer(void) */ perf_prepare_sample(&header, &data, event, ®s); - if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) + if (perf_output_begin(&handle, event, header.size * (top - at))) return 1; for (; at < top; at++) { @@ -616,7 +616,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, else regs.flags &= ~PERF_EFLAGS_EXACT; - if (perf_event_overflow(event, 1, &data, ®s)) + if (perf_event_overflow(event, &data, ®s)) x86_pmu_stop(event, 0); } diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index ead584fb6a7..7809d2bcb20 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -554,13 +554,102 @@ static __initconst const u64 p4_hw_cache_event_ids [ C(RESULT_MISS) ] = -1, }, }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, }; +/* + * Because of Netburst being quite restricted in how many + * identical events may run simultaneously, we introduce event aliases, + * ie the different events which have the same functionality but + * utilize non-intersected resources (ESCR/CCCR/counter registers). + * + * This allow us to relax restrictions a bit and run two or more + * identical events together. + * + * Never set any custom internal bits such as P4_CONFIG_HT, + * P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are + * either up to date automatically or not applicable at all. + */ +struct p4_event_alias { + u64 original; + u64 alternative; +} p4_event_aliases[] = { + { + /* + * Non-halted cycles can be substituted with non-sleeping cycles (see + * Intel SDM Vol3b for details). We need this alias to be able + * to run nmi-watchdog and 'perf top' (or any other user space tool + * which is interested in running PERF_COUNT_HW_CPU_CYCLES) + * simultaneously. + */ + .original = + p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) | + P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)), + .alternative = + p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0)| + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1)| + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2)| + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3)| + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3))| + p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT | + P4_CCCR_COMPARE), + }, +}; + +static u64 p4_get_alias_event(u64 config) +{ + u64 config_match; + int i; + + /* + * Only event with special mark is allowed, + * we're to be sure it didn't come as malformed + * RAW event. + */ + if (!(config & P4_CONFIG_ALIASABLE)) + return 0; + + config_match = config & P4_CONFIG_EVENT_ALIAS_MASK; + + for (i = 0; i < ARRAY_SIZE(p4_event_aliases); i++) { + if (config_match == p4_event_aliases[i].original) { + config_match = p4_event_aliases[i].alternative; + break; + } else if (config_match == p4_event_aliases[i].alternative) { + config_match = p4_event_aliases[i].original; + break; + } + } + + if (i >= ARRAY_SIZE(p4_event_aliases)) + return 0; + + return config_match | (config & P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS); +} + static u64 p4_general_events[PERF_COUNT_HW_MAX] = { /* non-halted CPU clocks */ [PERF_COUNT_HW_CPU_CYCLES] = p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) | - P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)), + P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)) | + P4_CONFIG_ALIASABLE, /* * retired instructions @@ -945,7 +1034,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) if (!x86_perf_event_set_period(event)) continue; - if (perf_event_overflow(event, 1, &data, regs)) + if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); } @@ -1120,6 +1209,8 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign struct p4_event_bind *bind; unsigned int i, thread, num; int cntr_idx, escr_idx; + u64 config_alias; + int pass; bitmap_zero(used_mask, X86_PMC_IDX_MAX); bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE); @@ -1128,6 +1219,17 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign hwc = &cpuc->event_list[i]->hw; thread = p4_ht_thread(cpu); + pass = 0; + +again: + /* + * It's possible to hit a circular lock + * between original and alternative events + * if both are scheduled already. + */ + if (pass > 2) + goto done; + bind = p4_config_get_bind(hwc->config); escr_idx = p4_get_escr_idx(bind->escr_msr[thread]); if (unlikely(escr_idx == -1)) @@ -1141,8 +1243,17 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign } cntr_idx = p4_next_cntr(thread, used_mask, bind); - if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) - goto done; + if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) { + /* + * Check whether an event alias is still available. + */ + config_alias = p4_get_alias_event(hwc->config); + if (!config_alias) + goto done; + hwc->config = config_alias; + pass++; + goto again; + } p4_pmu_swap_config_ts(hwc, cpu); if (assign) diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 9aeb78a23de..a621f342768 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -134,6 +134,24 @@ static int __init add_bus_probe(void) module_init(add_bus_probe); #ifdef CONFIG_PCI +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) +{ + struct device_node *np; + + for_each_node_by_type(np, "pci") { + const void *prop; + unsigned int bus_min; + + prop = of_get_property(np, "bus-range", NULL); + if (!prop) + continue; + bus_min = be32_to_cpup(prop); + if (bus->number == bus_min) + return np; + } + return NULL; +} + static int x86_of_pci_irq_enable(struct pci_dev *dev) { struct of_irq oirq; @@ -165,50 +183,8 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev) void __cpuinit x86_of_pci_init(void) { - struct device_node *np; - pcibios_enable_irq = x86_of_pci_irq_enable; pcibios_disable_irq = x86_of_pci_irq_disable; - - for_each_node_by_type(np, "pci") { - const void *prop; - struct pci_bus *bus; - unsigned int bus_min; - struct device_node *child; - - prop = of_get_property(np, "bus-range", NULL); - if (!prop) - continue; - bus_min = be32_to_cpup(prop); - - bus = pci_find_bus(0, bus_min); - if (!bus) { - printk(KERN_ERR "Can't find a node for bus %s.\n", - np->full_name); - continue; - } - - if (bus->self) - bus->self->dev.of_node = np; - else - bus->dev.of_node = np; - - for_each_child_of_node(np, child) { - struct pci_dev *dev; - u32 devfn; - - prop = of_get_property(child, "reg", NULL); - if (!prop) - continue; - - devfn = (be32_to_cpup(prop) >> 8) & 0xff; - dev = pci_get_slot(bus, devfn); - if (!dev) - continue; - dev->dev.of_node = child; - pci_dev_put(dev); - } - } } #endif diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index e71c98d3c0d..19853ad8afc 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -105,34 +105,6 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack, } /* - * We are returning from the irq stack and go to the previous one. - * If the previous stack is also in the irq stack, then bp in the first - * frame of the irq stack points to the previous, interrupted one. - * Otherwise we have another level of indirection: We first save - * the bp of the previous stack, then we switch the stack to the irq one - * and save a new bp that links to the previous one. - * (See save_args()) - */ -static inline unsigned long -fixup_bp_irq_link(unsigned long bp, unsigned long *stack, - unsigned long *irq_stack, unsigned long *irq_stack_end) -{ -#ifdef CONFIG_FRAME_POINTER - struct stack_frame *frame = (struct stack_frame *)bp; - unsigned long next; - - if (!in_irq_stack(stack, irq_stack, irq_stack_end)) { - if (!probe_kernel_address(&frame->next_frame, next)) - return next; - else - WARN_ONCE(1, "Perf: bad frame pointer = %p in " - "callchain\n", &frame->next_frame); - } -#endif - return bp; -} - -/* * x86-64 can have up to three kernel stacks: * process stack * interrupt stack @@ -155,9 +127,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, task = current; if (!stack) { - stack = &dummy; - if (task && task != current) + if (regs) + stack = (unsigned long *)regs->sp; + else if (task && task != current) stack = (unsigned long *)task->thread.sp; + else + stack = &dummy; } if (!bp) @@ -205,8 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, * pointer (index -1 to end) in the IRQ stack: */ stack = (unsigned long *) (irq_stack_end[-1]); - bp = fixup_bp_irq_link(bp, stack, irq_stack, - irq_stack_end); irq_stack_end = NULL; ops->stack(data, "EOI"); continue; diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 8a445a0c989..d656f68371a 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -297,27 +297,26 @@ ENDPROC(native_usergs_sysret64) .endm /* save partial stack frame */ - .pushsection .kprobes.text, "ax" -ENTRY(save_args) - XCPT_FRAME + .macro SAVE_ARGS_IRQ cld - /* - * start from rbp in pt_regs and jump over - * return address. - */ - movq_cfi rdi, RDI+8-RBP - movq_cfi rsi, RSI+8-RBP - movq_cfi rdx, RDX+8-RBP - movq_cfi rcx, RCX+8-RBP - movq_cfi rax, RAX+8-RBP - movq_cfi r8, R8+8-RBP - movq_cfi r9, R9+8-RBP - movq_cfi r10, R10+8-RBP - movq_cfi r11, R11+8-RBP - - leaq -RBP+8(%rsp),%rdi /* arg1 for handler */ - movq_cfi rbp, 8 /* push %rbp */ - leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ + /* start from rbp in pt_regs and jump over */ + movq_cfi rdi, RDI-RBP + movq_cfi rsi, RSI-RBP + movq_cfi rdx, RDX-RBP + movq_cfi rcx, RCX-RBP + movq_cfi rax, RAX-RBP + movq_cfi r8, R8-RBP + movq_cfi r9, R9-RBP + movq_cfi r10, R10-RBP + movq_cfi r11, R11-RBP + + /* Save rbp so that we can unwind from get_irq_regs() */ + movq_cfi rbp, 0 + + /* Save previous stack value */ + movq %rsp, %rsi + + leaq -RBP(%rsp),%rdi /* arg1 for handler */ testl $3, CS(%rdi) je 1f SWAPGS @@ -329,19 +328,14 @@ ENTRY(save_args) */ 1: incl PER_CPU_VAR(irq_count) jne 2f - popq_cfi %rax /* move return address... */ mov PER_CPU_VAR(irq_stack_ptr),%rsp EMPTY_FRAME 0 - pushq_cfi %rbp /* backlink for unwinder */ - pushq_cfi %rax /* ... to the new stack */ - /* - * We entered an interrupt context - irqs are off: - */ -2: TRACE_IRQS_OFF - ret - CFI_ENDPROC -END(save_args) - .popsection + +2: /* Store previous stack value */ + pushq %rsi + /* We entered an interrupt context - irqs are off: */ + TRACE_IRQS_OFF + .endm ENTRY(save_rest) PARTIAL_FRAME 1 REST_SKIP+8 @@ -791,7 +785,7 @@ END(interrupt) /* reserve pt_regs for scratch regs and rbp */ subq $ORIG_RAX-RBP, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP - call save_args + SAVE_ARGS_IRQ PARTIAL_FRAME 0 call \func .endm @@ -814,15 +808,14 @@ ret_from_intr: DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF decl PER_CPU_VAR(irq_count) - leaveq - CFI_RESTORE rbp + /* Restore saved previous stack */ + popq %rsi + leaq 16(%rsi), %rsp + CFI_DEF_CFA_REGISTER rsp - CFI_ADJUST_CFA_OFFSET -8 + CFI_ADJUST_CFA_OFFSET -16 - /* we did not save rbx, restore only from ARGOFFSET */ - addq $8, %rsp - CFI_ADJUST_CFA_OFFSET -8 exit_intr: GET_THREAD_INFO(%rcx) testl $3,CS-ARGOFFSET(%rsp) diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 5f9ecff328b..00354d4919a 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -608,7 +608,7 @@ int kgdb_arch_init(void) return register_die_notifier(&kgdb_notifier); } -static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi, +static void kgdb_hw_overflow_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { struct task_struct *tsk = current; @@ -638,7 +638,7 @@ void kgdb_arch_late(void) for (i = 0; i < HBP_NUM; i++) { if (breakinfo[i].pev) continue; - breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL); + breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); if (IS_ERR((void * __force)breakinfo[i].pev)) { printk(KERN_ERR "kgdb: Could not allocate hw" "breakpoints\nDisabling the kernel debugger\n"); diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 807c2a2b80f..82528799c5d 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -528,7 +528,7 @@ static int genregs_set(struct task_struct *target, return ret; } -static void ptrace_triggered(struct perf_event *bp, int nmi, +static void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { @@ -715,7 +715,8 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, attr.bp_type = HW_BREAKPOINT_W; attr.disabled = 1; - bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); + bp = register_user_hw_breakpoint(&attr, ptrace_triggered, + NULL, tsk); /* * CHECKME: the previous code returned -EIO if the addr wasn't diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 14eed214b58..9242436e993 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -427,6 +427,22 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), }, }, + { /* Handle problems with rebooting on the Latitude E5420. */ + .callback = set_pci_reboot, + .ident = "Dell Latitude E5420", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"), + }, + }, + { /* Handle problems with rebooting on the Latitude E6420. */ + .callback = set_pci_reboot, + .ident = "Dell Latitude E6420", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), + }, + }, { } }; diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 55d9bc03f69..fdd0c6430e5 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -66,7 +66,7 @@ void save_stack_trace(struct stack_trace *trace) } EXPORT_SYMBOL_GPL(save_stack_trace); -void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs) +void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) { dump_trace(current, regs, NULL, 0, &save_stack_ops, trace); if (trace->nr_entries < trace->max_entries) diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 30ac65df7d4..e07a2fc876b 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -36,6 +36,7 @@ #include <asm/bootparam.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> +#include <asm/swiotlb.h> #include <asm/fixmap.h> #include <asm/proto.h> #include <asm/setup.h> diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index db832fd65ec..13ee258442a 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -71,7 +71,8 @@ #include <asm/stackprotector.h> #include <asm/reboot.h> /* for struct machine_ops */ -/*G:010 Welcome to the Guest! +/*G:010 + * Welcome to the Guest! * * The Guest in our tale is a simple creature: identical to the Host but * behaving in simplified but equivalent ways. In particular, the Guest is the @@ -190,15 +191,23 @@ static void lazy_hcall4(unsigned long call, #endif /*G:036 - * When lazy mode is turned off reset the per-cpu lazy mode variable and then - * issue the do-nothing hypercall to flush any stored calls. -:*/ + * When lazy mode is turned off, we issue the do-nothing hypercall to + * flush any stored calls, and call the generic helper to reset the + * per-cpu lazy mode variable. + */ static void lguest_leave_lazy_mmu_mode(void) { hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); paravirt_leave_lazy_mmu(); } +/* + * We also catch the end of context switch; we enter lazy mode for much of + * that too, so again we need to flush here. + * + * (Technically, this is lazy CPU mode, and normally we're in lazy MMU + * mode, but unlike Xen, lguest doesn't care about the difference). + */ static void lguest_end_context_switch(struct task_struct *next) { hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); @@ -391,7 +400,7 @@ static void lguest_load_tr_desc(void) * giant ball of hair. Its entry in the current Intel manual runs to 28 pages. * * This instruction even it has its own Wikipedia entry. The Wikipedia entry - * has been translated into 5 languages. I am not making this up! + * has been translated into 6 languages. I am not making this up! * * We could get funky here and identify ourselves as "GenuineLguest", but * instead we just use the real "cpuid" instruction. Then I pretty much turned @@ -458,7 +467,7 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, /* * PAE systems can mark pages as non-executable. Linux calls this the * NX bit. Intel calls it XD (eXecute Disable), AMD EVP (Enhanced - * Virus Protection). We just switch turn if off here, since we don't + * Virus Protection). We just switch it off here, since we don't * support it. */ case 0x80000001: @@ -520,17 +529,16 @@ static unsigned long lguest_read_cr2(void) /* See lguest_set_pte() below. */ static bool cr3_changed = false; +static unsigned long current_cr3; /* * cr3 is the current toplevel pagetable page: the principle is the same as - * cr0. Keep a local copy, and tell the Host when it changes. The only - * difference is that our local copy is in lguest_data because the Host needs - * to set it upon our initial hypercall. + * cr0. Keep a local copy, and tell the Host when it changes. */ static void lguest_write_cr3(unsigned long cr3) { - lguest_data.pgdir = cr3; lazy_hcall1(LHCALL_NEW_PGTABLE, cr3); + current_cr3 = cr3; /* These two page tables are simple, linear, and used during boot */ if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table)) @@ -539,7 +547,7 @@ static void lguest_write_cr3(unsigned long cr3) static unsigned long lguest_read_cr3(void) { - return lguest_data.pgdir; + return current_cr3; } /* cr4 is used to enable and disable PGE, but we don't care. */ @@ -641,7 +649,7 @@ static void lguest_write_cr4(unsigned long val) /* * The Guest calls this after it has set a second-level entry (pte), ie. to map - * a page into a process' address space. Wetell the Host the toplevel and + * a page into a process' address space. We tell the Host the toplevel and * address this corresponds to. The Guest uses one pagetable per process, so * we need to tell the Host which one we're changing (mm->pgd). */ @@ -758,7 +766,7 @@ static void lguest_pmd_clear(pmd_t *pmdp) static void lguest_flush_tlb_single(unsigned long addr) { /* Simply set it to zero: if it was not, it will fault back in. */ - lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); + lazy_hcall3(LHCALL_SET_PTE, current_cr3, addr, 0); } /* @@ -1140,7 +1148,7 @@ static struct notifier_block paniced = { static __init char *lguest_memory_setup(void) { /* - *The Linux bootloader header contains an "e820" memory map: the + * The Linux bootloader header contains an "e820" memory map: the * Launcher populated the first entry with our memory limit. */ e820_add_region(boot_params.e820_map[0].addr, diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 4f420c2f2d5..6ddfe4fc23c 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S @@ -6,18 +6,22 @@ #include <asm/processor-flags.h> /*G:020 - * Our story starts with the kernel booting into startup_32 in - * arch/x86/kernel/head_32.S. It expects a boot header, which is created by - * the bootloader (the Launcher in our case). + + * Our story starts with the bzImage: booting starts at startup_32 in + * arch/x86/boot/compressed/head_32.S. This merely uncompresses the real + * kernel in place and then jumps into it: startup_32 in + * arch/x86/kernel/head_32.S. Both routines expects a boot header in the %esi + * register, which is created by the bootloader (the Launcher in our case). * * The startup_32 function does very little: it clears the uninitialized global * C variables which we expect to be zero (ie. BSS) and then copies the boot - * header and kernel command line somewhere safe. Finally it checks the - * 'hardware_subarch' field. This was introduced in 2.6.24 for lguest and Xen: - * if it's set to '1' (lguest's assigned number), then it calls us here. + * header and kernel command line somewhere safe, and populates some initial + * page tables. Finally it checks the 'hardware_subarch' field. This was + * introduced in 2.6.24 for lguest and Xen: if it's set to '1' (lguest's + * assigned number), then it calls us here. * * WARNING: be very careful here! We're running at addresses equal to physical - * addesses (around 0), not above PAGE_OFFSET as most code expectes + * addresses (around 0), not above PAGE_OFFSET as most code expects * (eg. 0xC0000000). Jumps are relative, so they're OK, but we can't touch any * data without remembering to subtract __PAGE_OFFSET! * @@ -27,13 +31,18 @@ .section .init.text, "ax", @progbits ENTRY(lguest_entry) /* - * We make the "initialization" hypercall now to tell the Host about - * us, and also find out where it put our page tables. + * We make the "initialization" hypercall now to tell the Host where + * our lguest_data struct is. */ movl $LHCALL_LGUEST_INIT, %eax movl $lguest_data - __PAGE_OFFSET, %ebx int $LGUEST_TRAP_ENTRY + /* Now turn our pagetables on; setup by arch/x86/kernel/head_32.S. */ + movl $LHCALL_NEW_PGTABLE, %eax + movl $(initial_page_table - __PAGE_OFFSET), %ebx + int $LGUEST_TRAP_ENTRY + /* Set up the initial stack so we can run C code. */ movl $(init_thread_union+THREAD_SIZE),%esp @@ -96,12 +105,8 @@ send_interrupts: */ pushl %eax movl $LHCALL_SEND_INTERRUPTS, %eax - /* - * This is a vmcall instruction (same thing that KVM uses). Older - * assembler versions might not know the "vmcall" instruction, so we - * create one manually here. - */ - .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ + /* This is the actual hypercall trap. */ + int $LGUEST_TRAP_ENTRY /* Put eax back the way we found it. */ popl %eax ret diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index f2479f19ddd..6ba477342b8 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -18,7 +18,7 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o lib-y := delay.o lib-y += thunk_$(BITS).o -lib-y += usercopy_$(BITS).o getuser.o putuser.o +lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o lib-y += memcpy_$(BITS).o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c new file mode 100644 index 00000000000..97be9cb5448 --- /dev/null +++ b/arch/x86/lib/usercopy.c @@ -0,0 +1,43 @@ +/* + * User address space access functions. + * + * For licencing details see kernel-base/COPYING + */ + +#include <linux/highmem.h> +#include <linux/module.h> + +/* + * best effort, GUP based copy_from_user() that is NMI-safe + */ +unsigned long +copy_from_user_nmi(void *to, const void __user *from, unsigned long n) +{ + unsigned long offset, addr = (unsigned long)from; + unsigned long size, len = 0; + struct page *page; + void *map; + int ret; + + do { + ret = __get_user_pages_fast(addr, 1, 0, &page); + if (!ret) + break; + + offset = addr & (PAGE_SIZE - 1); + size = min(PAGE_SIZE - offset, n - len); + + map = kmap_atomic(page); + memcpy(to, map+offset, size); + kunmap_atomic(map); + put_page(page); + + len += size; + to += size; + addr += size; + + } while (len < n); + + return len; +} +EXPORT_SYMBOL_GPL(copy_from_user_nmi); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2dbf6bf4c7e..4d09df054e3 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1059,7 +1059,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* * If we're in an interrupt, have no user context or are running @@ -1161,11 +1161,11 @@ good_area: if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } if (fault & VM_FAULT_RETRY) { diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c index 704a37ceddd..dab41876cdd 100644 --- a/arch/x86/mm/kmemcheck/error.c +++ b/arch/x86/mm/kmemcheck/error.c @@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state, e->trace.entries = e->trace_entries; e->trace.max_entries = ARRAY_SIZE(e->trace_entries); e->trace.skip = 0; - save_stack_trace_regs(&e->trace, regs); + save_stack_trace_regs(regs, &e->trace); /* Round address down to nearest 16 bytes */ shadow_copy = kmemcheck_shadow_lookup(address diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index a5b64ab4cd6..bff89dfe361 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c @@ -11,10 +11,11 @@ #include <linux/oprofile.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/compat.h> +#include <linux/uaccess.h> + #include <asm/ptrace.h> -#include <asm/uaccess.h> #include <asm/stacktrace.h> -#include <linux/compat.h> static int backtrace_stack(void *data, char *name) { @@ -40,13 +41,13 @@ static struct stacktrace_ops backtrace_ops = { static struct stack_frame_ia32 * dump_user_backtrace_32(struct stack_frame_ia32 *head) { + /* Also check accessibility of one struct frame_head beyond: */ struct stack_frame_ia32 bufhead[2]; struct stack_frame_ia32 *fp; + unsigned long bytes; - /* Also check accessibility of one struct frame_head beyond */ - if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) - return NULL; - if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) + bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); + if (bytes != sizeof(bufhead)) return NULL; fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); @@ -87,12 +88,12 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth) static struct stack_frame *dump_user_backtrace(struct stack_frame *head) { + /* Also check accessibility of one struct frame_head beyond: */ struct stack_frame bufhead[2]; + unsigned long bytes; - /* Also check accessibility of one struct stack_frame beyond */ - if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) - return NULL; - if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) + bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); + if (bytes != sizeof(bufhead)) return NULL; oprofile_add_trace(bufhead[0].return_address); diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index f567965c062..1017c7bee38 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -1,8 +1,13 @@ /* - * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux - * x86 PCI core to support the Xen PCI Frontend + * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and + * initial domain support. We also handle the DSDT _PRT callbacks for GSI's + * used in HVM and initial domain mode (PV does not parse ACPI, so it has no + * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and + * 0xcf8 PCI configuration read/write. * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> + * Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> + * Stefano Stabellini <stefano.stabellini@eu.citrix.com> */ #include <linux/module.h> #include <linux/init.h> @@ -19,22 +24,53 @@ #include <xen/events.h> #include <asm/xen/pci.h> +static int xen_pcifront_enable_irq(struct pci_dev *dev) +{ + int rc; + int share = 1; + int pirq; + u8 gsi; + + rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); + if (rc < 0) { + dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", + rc); + return rc; + } + /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/ + pirq = gsi; + + if (gsi < NR_IRQS_LEGACY) + share = 0; + + rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront"); + if (rc < 0) { + dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n", + gsi, pirq, rc); + return rc; + } + + dev->irq = rc; + dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); + return 0; +} + #ifdef CONFIG_ACPI -static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, - int trigger, int polarity) +static int xen_register_pirq(u32 gsi, int gsi_override, int triggering, + bool set_pirq) { - int rc, irq; + int rc, pirq = -1, irq = -1; struct physdev_map_pirq map_irq; int shareable = 0; char *name; - if (!xen_hvm_domain()) - return -1; + if (set_pirq) + pirq = gsi; map_irq.domid = DOMID_SELF; map_irq.type = MAP_PIRQ_TYPE_GSI; map_irq.index = gsi; - map_irq.pirq = -1; + map_irq.pirq = pirq; rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (rc) { @@ -42,7 +78,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, return -1; } - if (trigger == ACPI_EDGE_SENSITIVE) { + if (triggering == ACPI_EDGE_SENSITIVE) { shareable = 0; name = "ioapic-edge"; } else { @@ -50,12 +86,63 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, name = "ioapic-level"; } + if (gsi_override >= 0) + gsi = gsi_override; + irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name); + if (irq < 0) + goto out; + + printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi); +out: + return irq; +} + +static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, + int trigger, int polarity) +{ + if (!xen_hvm_domain()) + return -1; - printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); + return xen_register_pirq(gsi, -1 /* no GSI override */, trigger, + false /* no mapping of GSI to PIRQ */); +} + +#ifdef CONFIG_XEN_DOM0 +static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity) +{ + int rc, irq; + struct physdev_setup_gsi setup_gsi; + + if (!xen_pv_domain()) + return -1; + + printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", + gsi, triggering, polarity); + + irq = xen_register_pirq(gsi, gsi_override, triggering, true); + + setup_gsi.gsi = gsi; + setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); + setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1); + + rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi); + if (rc == -EEXIST) + printk(KERN_INFO "Already setup the GSI :%d\n", gsi); + else if (rc) { + printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n", + gsi, rc); + } return irq; } + +static int acpi_register_gsi_xen(struct device *dev, u32 gsi, + int trigger, int polarity) +{ + return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity); +} +#endif #endif #if defined(CONFIG_PCI_MSI) @@ -65,6 +152,43 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, struct xen_pci_frontend_ops *xen_pci_frontend; EXPORT_SYMBOL_GPL(xen_pci_frontend); +static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int irq, ret, i; + struct msi_desc *msidesc; + int *v; + + v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL); + if (!v) + return -ENOMEM; + + if (type == PCI_CAP_ID_MSIX) + ret = xen_pci_frontend_enable_msix(dev, v, nvec); + else + ret = xen_pci_frontend_enable_msi(dev, v); + if (ret) + goto error; + i = 0; + list_for_each_entry(msidesc, &dev->msi_list, list) { + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, + (type == PCI_CAP_ID_MSIX) ? + "pcifront-msi-x" : + "pcifront-msi", + DOMID_SELF); + if (irq < 0) + goto free; + i++; + } + kfree(v); + return 0; + +error: + dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); +free: + kfree(v); + return ret; +} + #define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \ MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0)) @@ -123,67 +247,6 @@ error: return -ENODEV; } -/* - * For MSI interrupts we have to use drivers/xen/event.s functions to - * allocate an irq_desc and setup the right */ - - -static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - int irq, ret, i; - struct msi_desc *msidesc; - int *v; - - v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL); - if (!v) - return -ENOMEM; - - if (type == PCI_CAP_ID_MSIX) - ret = xen_pci_frontend_enable_msix(dev, v, nvec); - else - ret = xen_pci_frontend_enable_msi(dev, v); - if (ret) - goto error; - i = 0; - list_for_each_entry(msidesc, &dev->msi_list, list) { - irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, - (type == PCI_CAP_ID_MSIX) ? - "pcifront-msi-x" : - "pcifront-msi", - DOMID_SELF); - if (irq < 0) - goto free; - i++; - } - kfree(v); - return 0; - -error: - dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); -free: - kfree(v); - return ret; -} - -static void xen_teardown_msi_irqs(struct pci_dev *dev) -{ - struct msi_desc *msidesc; - - msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); - if (msidesc->msi_attrib.is_msix) - xen_pci_frontend_disable_msix(dev); - else - xen_pci_frontend_disable_msi(dev); - - /* Free the IRQ's and the msidesc using the generic code. */ - default_teardown_msi_irqs(dev); -} - -static void xen_teardown_msi_irq(unsigned int irq) -{ - xen_destroy_irq(irq); -} - #ifdef CONFIG_XEN_DOM0 static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { @@ -242,45 +305,28 @@ out: return ret; } #endif -#endif -static int xen_pcifront_enable_irq(struct pci_dev *dev) +static void xen_teardown_msi_irqs(struct pci_dev *dev) { - int rc; - int share = 1; - int pirq; - u8 gsi; - - rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); - if (rc < 0) { - dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", - rc); - return rc; - } - - rc = xen_allocate_pirq_gsi(gsi); - if (rc < 0) { - dev_warn(&dev->dev, "Xen PCI: failed to allocate a PIRQ for GSI%d: %d\n", - gsi, rc); - return rc; - } - pirq = rc; + struct msi_desc *msidesc; - if (gsi < NR_IRQS_LEGACY) - share = 0; + msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); + if (msidesc->msi_attrib.is_msix) + xen_pci_frontend_disable_msix(dev); + else + xen_pci_frontend_disable_msi(dev); - rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront"); - if (rc < 0) { - dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n", - gsi, pirq, rc); - return rc; - } + /* Free the IRQ's and the msidesc using the generic code. */ + default_teardown_msi_irqs(dev); +} - dev->irq = rc; - dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); - return 0; +static void xen_teardown_msi_irq(unsigned int irq) +{ + xen_destroy_irq(irq); } +#endif + int __init pci_xen_init(void) { if (!xen_pv_domain() || xen_initial_domain()) @@ -327,79 +373,6 @@ int __init pci_xen_hvm_init(void) } #ifdef CONFIG_XEN_DOM0 -static int xen_register_pirq(u32 gsi, int gsi_override, int triggering) -{ - int rc, pirq, irq = -1; - struct physdev_map_pirq map_irq; - int shareable = 0; - char *name; - - if (!xen_pv_domain()) - return -1; - - if (triggering == ACPI_EDGE_SENSITIVE) { - shareable = 0; - name = "ioapic-edge"; - } else { - shareable = 1; - name = "ioapic-level"; - } - pirq = xen_allocate_pirq_gsi(gsi); - if (pirq < 0) - goto out; - - if (gsi_override >= 0) - irq = xen_bind_pirq_gsi_to_irq(gsi_override, pirq, shareable, name); - else - irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name); - if (irq < 0) - goto out; - - printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", pirq, irq, gsi); - - map_irq.domid = DOMID_SELF; - map_irq.type = MAP_PIRQ_TYPE_GSI; - map_irq.index = gsi; - map_irq.pirq = pirq; - - rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); - if (rc) { - printk(KERN_WARNING "xen map irq failed %d\n", rc); - return -1; - } - -out: - return irq; -} - -static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity) -{ - int rc, irq; - struct physdev_setup_gsi setup_gsi; - - if (!xen_pv_domain()) - return -1; - - printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", - gsi, triggering, polarity); - - irq = xen_register_pirq(gsi, gsi_override, triggering); - - setup_gsi.gsi = gsi; - setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); - setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1); - - rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi); - if (rc == -EEXIST) - printk(KERN_INFO "Already setup the GSI :%d\n", gsi); - else if (rc) { - printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n", - gsi, rc); - } - - return irq; -} - static __init void xen_setup_acpi_sci(void) { int rc; @@ -419,7 +392,7 @@ static __init void xen_setup_acpi_sci(void) } trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; - + printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d " "polarity=%d\n", gsi, trigger, polarity); @@ -434,10 +407,9 @@ static __init void xen_setup_acpi_sci(void) * the ACPI interpreter and keels over since IRQ 9 has not been * setup as we had setup IRQ 20 for it). */ - /* Check whether the GSI != IRQ */ if (acpi_gsi_to_irq(gsi, &irq) == 0) { - if (irq >= 0 && irq != gsi) - /* Bugger, we MUST have that IRQ. */ + /* Use the provided value if it's valid. */ + if (irq >= 0) gsi_override = irq; } @@ -447,41 +419,16 @@ static __init void xen_setup_acpi_sci(void) return; } -static int acpi_register_gsi_xen(struct device *dev, u32 gsi, - int trigger, int polarity) +int __init pci_xen_initial_domain(void) { - return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity); -} + int irq; -static int __init pci_xen_initial_domain(void) -{ #ifdef CONFIG_PCI_MSI x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; x86_msi.teardown_msi_irq = xen_teardown_msi_irq; #endif xen_setup_acpi_sci(); __acpi_register_gsi = acpi_register_gsi_xen; - - return 0; -} - -void __init xen_setup_pirqs(void) -{ - int pirq, irq; - - pci_xen_initial_domain(); - - if (0 == nr_ioapics) { - for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { - pirq = xen_allocate_pirq_gsi(irq); - if (WARN(pirq < 0, - "Could not allocate PIRQ for legacy interrupt\n")) - break; - irq = xen_bind_pirq_gsi_to_irq(irq, pirq, 0, "xt-pic"); - } - return; - } - /* Pre-allocate legacy irqs */ for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { int trigger, polarity; @@ -490,12 +437,16 @@ void __init xen_setup_pirqs(void) continue; xen_register_pirq(irq, -1 /* no GSI override */, - trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE); + trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE, + true /* Map GSI to PIRQ */); } + if (0 == nr_ioapics) { + for (irq = 0; irq < NR_IRQS_LEGACY; irq++) + xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic"); + } + return 0; } -#endif -#ifdef CONFIG_XEN_DOM0 struct xen_device_domain_owner { domid_t domain; struct pci_dev *dev; diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 17c565de3d6..a6575b949b1 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -18,5 +18,5 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o - +obj-$(CONFIG_XEN_DOM0) += vga.o obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 5525163a039..53257421082 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1248,6 +1248,14 @@ asmlinkage void __init xen_start_kernel(void) if (pci_xen) x86_init.pci.arch_init = pci_xen_init; } else { + const struct dom0_vga_console_info *info = + (void *)((char *)xen_start_info + + xen_start_info->console.dom0.info_off); + + xen_init_vga(info, xen_start_info->console.dom0.info_size); + xen_start_info->console.domU.mfn = 0; + xen_start_info->console.domU.evtchn = 0; + /* Make sure ACS will be enabled */ pci_request_acs(); } diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c index 25c52f94a27..ffcf2615640 100644 --- a/arch/x86/xen/platform-pci-unplug.c +++ b/arch/x86/xen/platform-pci-unplug.c @@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); #ifdef CONFIG_XEN_PVHVM static int xen_emul_unplug; -static int __init check_platform_magic(void) +static int check_platform_magic(void) { short magic; char protocol; diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c new file mode 100644 index 00000000000..1cd7f4d11e2 --- /dev/null +++ b/arch/x86/xen/vga.c @@ -0,0 +1,67 @@ +#include <linux/screen_info.h> +#include <linux/init.h> + +#include <asm/bootparam.h> +#include <asm/setup.h> + +#include <xen/interface/xen.h> + +#include "xen-ops.h" + +void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) +{ + struct screen_info *screen_info = &boot_params.screen_info; + + /* This is drawn from a dump from vgacon:startup in + * standard Linux. */ + screen_info->orig_video_mode = 3; + screen_info->orig_video_isVGA = 1; + screen_info->orig_video_lines = 25; + screen_info->orig_video_cols = 80; + screen_info->orig_video_ega_bx = 3; + screen_info->orig_video_points = 16; + screen_info->orig_y = screen_info->orig_video_lines - 1; + + switch (info->video_type) { + case XEN_VGATYPE_TEXT_MODE_3: + if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3) + + sizeof(info->u.text_mode_3)) + break; + screen_info->orig_video_lines = info->u.text_mode_3.rows; + screen_info->orig_video_cols = info->u.text_mode_3.columns; + screen_info->orig_x = info->u.text_mode_3.cursor_x; + screen_info->orig_y = info->u.text_mode_3.cursor_y; + screen_info->orig_video_points = + info->u.text_mode_3.font_height; + break; + + case XEN_VGATYPE_VESA_LFB: + if (size < offsetof(struct dom0_vga_console_info, + u.vesa_lfb.gbl_caps)) + break; + screen_info->orig_video_isVGA = VIDEO_TYPE_VLFB; + screen_info->lfb_width = info->u.vesa_lfb.width; + screen_info->lfb_height = info->u.vesa_lfb.height; + screen_info->lfb_depth = info->u.vesa_lfb.bits_per_pixel; + screen_info->lfb_base = info->u.vesa_lfb.lfb_base; + screen_info->lfb_size = info->u.vesa_lfb.lfb_size; + screen_info->lfb_linelength = info->u.vesa_lfb.bytes_per_line; + screen_info->red_size = info->u.vesa_lfb.red_size; + screen_info->red_pos = info->u.vesa_lfb.red_pos; + screen_info->green_size = info->u.vesa_lfb.green_size; + screen_info->green_pos = info->u.vesa_lfb.green_pos; + screen_info->blue_size = info->u.vesa_lfb.blue_size; + screen_info->blue_pos = info->u.vesa_lfb.blue_pos; + screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size; + screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos; + if (size >= offsetof(struct dom0_vga_console_info, + u.vesa_lfb.gbl_caps) + + sizeof(info->u.vesa_lfb.gbl_caps)) + screen_info->capabilities = info->u.vesa_lfb.gbl_caps; + if (size >= offsetof(struct dom0_vga_console_info, + u.vesa_lfb.mode_attrs) + + sizeof(info->u.vesa_lfb.mode_attrs)) + screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs; + break; + } +} diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 97dfdc8757b..b095739ccd4 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -88,6 +88,17 @@ static inline void xen_uninit_lock_cpu(int cpu) } #endif +struct dom0_vga_console_info; + +#ifdef CONFIG_XEN_DOM0 +void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); +#else +static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, + size_t size) +{ +} +#endif + /* Declare an asm function, along with symbols needed to make it inlineable */ #define DECL_ASM(ret, name, ...) \ |