diff options
Diffstat (limited to 'drivers/spi')
45 files changed, 9418 insertions, 1622 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f55eb010733..78f9fd02c1b 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -100,6 +100,33 @@ config SPI_BUTTERFLY inexpensive battery powered microcontroller evaluation board. This same cable can be used to flash new firmware. +config SPI_COLDFIRE_QSPI + tristate "Freescale Coldfire QSPI controller" + depends on (M520x || M523x || M5249 || M527x || M528x || M532x) + help + This enables support for the Coldfire QSPI controller in master + mode. + + This driver can also be built as a module. If so, the module + will be called coldfire_qspi. + +config SPI_DAVINCI + tristate "SPI controller driver for DaVinci/DA8xx SoC's" + depends on SPI_MASTER && ARCH_DAVINCI + select SPI_BITBANG + help + SPI master controller for DaVinci and DA8xx SPI modules. + +config SPI_EP93XX + tristate "Cirrus Logic EP93xx SPI controller" + depends on ARCH_EP93XX + help + This enables using the Cirrus EP93xx SPI controller in master + mode. + + To compile this driver as a module, choose M here. The module will be + called ep93xx_spi. + config SPI_GPIO tristate "GPIO-based bitbanging SPI Master" depends on GENERIC_GPIO @@ -116,10 +143,26 @@ config SPI_GPIO GPIO operations, you should be able to leverage that for better speed with a custom version of this driver; see the source code. +config SPI_IMX_VER_IMX1 + def_bool y if SOC_IMX1 + +config SPI_IMX_VER_0_0 + def_bool y if SOC_IMX21 || SOC_IMX27 + +config SPI_IMX_VER_0_4 + def_bool y if ARCH_MX31 + +config SPI_IMX_VER_0_7 + def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 + +config SPI_IMX_VER_2_3 + def_bool y if ARCH_MX51 + config SPI_IMX tristate "Freescale i.MX SPI controllers" depends on ARCH_MXC select SPI_BITBANG + default m if IMX_HAVE_PLATFORM_SPI_IMX help This enables using the Freescale i.MX SPI controllers in master mode. @@ -148,12 +191,34 @@ config SPI_MPC52xx_PSC This enables using the Freescale MPC52xx Programmable Serial Controller in master SPI mode. -config SPI_MPC8xxx - tristate "Freescale MPC8xxx SPI controller" +config SPI_MPC512x_PSC + tristate "Freescale MPC512x PSC SPI controller" + depends on SPI_MASTER && PPC_MPC512x + help + This enables using the Freescale MPC5121 Programmable Serial + Controller in SPI master mode. + +config SPI_FSL_LIB + tristate depends on FSL_SOC + +config SPI_FSL_SPI + tristate "Freescale SPI controller" + depends on FSL_SOC + select SPI_FSL_LIB help - This enables using the Freescale MPC8xxx SPI controllers in master - mode. + This enables using the Freescale SPI controllers in master mode. + MPC83xx platform uses the controller in cpu mode or CPM/QE mode. + MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. + +config SPI_FSL_ESPI + tristate "Freescale eSPI controller" + depends on FSL_SOC + select SPI_FSL_LIB + help + This enables using the Freescale eSPI controllers in master mode. + From MPC8536, 85xx platform uses the controller, and all P10xx, + P20xx, P30xx,P40xx, P50xx uses this controller. config SPI_OMAP_UWIRE tristate "OMAP1 MicroWire" @@ -163,10 +228,10 @@ config SPI_OMAP_UWIRE This hooks up to the MicroWire controller on OMAP1 chips. config SPI_OMAP24XX - tristate "McSPI driver for OMAP24xx/OMAP34xx" - depends on ARCH_OMAP24XX || ARCH_OMAP34XX + tristate "McSPI driver for OMAP" + depends on ARCH_OMAP2PLUS help - SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI + SPI master controller for OMAP24XX and later Multichannel SPI (McSPI) modules. config SPI_OMAP_100K @@ -264,6 +329,20 @@ config SPI_STMP3XXX help SPI driver for Freescale STMP37xx/378x SoC SSP interface +config SPI_TEGRA + tristate "Nvidia Tegra SPI controller" + depends on ARCH_TEGRA + select TEGRA_SYSTEM_DMA + help + SPI driver for NVidia Tegra SoCs + +config SPI_TOPCLIFF_PCH + tristate "Topcliff PCH SPI Controller" + depends on PCI + help + SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus + used in some x86 embedded processors. + config SPI_TXX9 tristate "Toshiba TXx9 SPI controller" depends on GENERIC_GPIO && CPU_TX49XX @@ -308,7 +387,7 @@ config SPI_NUC900 # config SPI_DESIGNWARE - bool "DesignWare SPI controller core support" + tristate "DesignWare SPI controller core support" depends on SPI_MASTER help general driver for SPI controller core from DesignWare @@ -317,6 +396,10 @@ config SPI_DW_PCI tristate "PCI interface driver for DW SPI core" depends on SPI_DESIGNWARE && PCI +config SPI_DW_MMIO + tristate "Memory-mapped io interface driver for DW SPI core" + depends on SPI_DESIGNWARE && HAVE_CLK + # # There are lots of SPI device types, with sensors and memory # being probably the most widely used ones. diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index f3d2810ba11..8bc1a5abac1 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -2,9 +2,7 @@ # Makefile for kernel SPI drivers. # -ifeq ($(CONFIG_SPI_DEBUG),y) -EXTRA_CFLAGS += -DDEBUG -endif +ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG # small core, mostly translating board-specific # config declarations into driver model code @@ -16,8 +14,12 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o obj-$(CONFIG_SPI_AU1550) += au1550_spi.o obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o +obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o +obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o +obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o +obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o obj-$(CONFIG_SPI_GPIO) += spi_gpio.o obj-$(CONFIG_SPI_IMX) += spi_imx.o obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o @@ -27,13 +29,18 @@ obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o obj-$(CONFIG_SPI_ORION) += orion_spi.o obj-$(CONFIG_SPI_PL022) += amba-pl022.o +obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o -obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o +obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o +obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o +obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o +obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o +obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o obj-$(CONFIG_SPI_TXX9) += spi_txx9.o obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index ff5bbb9c43c..fb3d1b31772 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c @@ -27,7 +27,6 @@ /* * TODO: * - add timeout on polled transfers - * - add generic DMA framework support */ #include <linux/init.h> @@ -44,6 +43,10 @@ #include <linux/amba/bus.h> #include <linux/amba/pl022.h> #include <linux/io.h> +#include <linux/slab.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> /* * This macro is used to define some register default values. @@ -101,13 +104,21 @@ /* * SSP Control Register 0 - SSP_CR0 */ -#define SSP_CR0_MASK_DSS (0x1FUL << 0) -#define SSP_CR0_MASK_HALFDUP (0x1UL << 5) +#define SSP_CR0_MASK_DSS (0x0FUL << 0) +#define SSP_CR0_MASK_FRF (0x3UL << 4) #define SSP_CR0_MASK_SPO (0x1UL << 6) #define SSP_CR0_MASK_SPH (0x1UL << 7) #define SSP_CR0_MASK_SCR (0xFFUL << 8) -#define SSP_CR0_MASK_CSS (0x1FUL << 16) -#define SSP_CR0_MASK_FRF (0x3UL << 21) + +/* + * The ST version of this block moves som bits + * in SSP_CR0 and extends it to 32 bits + */ +#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0) +#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5) +#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16) +#define SSP_CR0_MASK_FRF_ST (0x3UL << 21) + /* * SSP Control Register 0 - SSP_CR1 @@ -116,16 +127,18 @@ #define SSP_CR1_MASK_SSE (0x1UL << 1) #define SSP_CR1_MASK_MS (0x1UL << 2) #define SSP_CR1_MASK_SOD (0x1UL << 3) -#define SSP_CR1_MASK_RENDN (0x1UL << 4) -#define SSP_CR1_MASK_TENDN (0x1UL << 5) -#define SSP_CR1_MASK_MWAIT (0x1UL << 6) -#define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7) -#define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10) /* - * SSP Data Register - SSP_DR + * The ST version of this block adds some bits + * in SSP_CR1 */ -#define SSP_DR_MASK_DATA 0xFFFFFFFF +#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4) +#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5) +#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6) +#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7) +#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10) +/* This one is only in the PL023 variant */ +#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13) /* * SSP Status Register - SSP_SR @@ -133,7 +146,7 @@ #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ -#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ +#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ /* @@ -226,7 +239,7 @@ /* * SSP Test Data Register - SSP_TDR */ -#define TDR_MASK_TESTDATA (0xFFFFFFFF) +#define TDR_MASK_TESTDATA (0xFFFFFFFF) /* * Message State @@ -234,33 +247,33 @@ * hold a single state value, that's why all this * (void *) casting is done here. */ -#define STATE_START ((void *) 0) -#define STATE_RUNNING ((void *) 1) -#define STATE_DONE ((void *) 2) -#define STATE_ERROR ((void *) -1) +#define STATE_START ((void *) 0) +#define STATE_RUNNING ((void *) 1) +#define STATE_DONE ((void *) 2) +#define STATE_ERROR ((void *) -1) /* * Queue State */ -#define QUEUE_RUNNING (0) -#define QUEUE_STOPPED (1) +#define QUEUE_RUNNING (0) +#define QUEUE_STOPPED (1) /* * SSP State - Whether Enabled or Disabled */ -#define SSP_DISABLED (0) -#define SSP_ENABLED (1) +#define SSP_DISABLED (0) +#define SSP_ENABLED (1) /* * SSP DMA State - Whether DMA Enabled or Disabled */ -#define SSP_DMA_DISABLED (0) -#define SSP_DMA_ENABLED (1) +#define SSP_DMA_DISABLED (0) +#define SSP_DMA_ENABLED (1) /* * SSP Clock Defaults */ -#define NMDK_SSP_DEFAULT_CLKRATE 0x2 -#define NMDK_SSP_DEFAULT_PRESCALE 0x40 +#define SSP_DEFAULT_CLKRATE 0x2 +#define SSP_DEFAULT_PRESCALE 0x40 /* * SSP Clock Parameter ranges @@ -306,16 +319,22 @@ enum ssp_writing { * @fifodepth: depth of FIFOs (both) * @max_bpw: maximum number of bits per word * @unidir: supports unidirection transfers + * @extended_cr: 32 bit wide control register 0 with extra + * features and extra features in CR1 as found in the ST variants + * @pl023: supports a subset of the ST extensions called "PL023" */ struct vendor_data { int fifodepth; int max_bpw; bool unidir; + bool extended_cr; + bool pl023; }; /** * struct pl022 - This is the private SSP driver data structure * @adev: AMBA device model hookup + * @vendor: Vendor data for the IP block * @phybase: The physical memory where the SSP device resides * @virtbase: The virtual memory where the SSP is mapped * @master: SPI framework hookup @@ -363,11 +382,21 @@ struct pl022 { void *rx_end; enum ssp_reading read; enum ssp_writing write; + u32 exp_fifo_level; + /* DMA settings */ +#ifdef CONFIG_DMA_ENGINE + struct dma_chan *dma_rx_channel; + struct dma_chan *dma_tx_channel; + struct sg_table sgt_rx; + struct sg_table sgt_tx; + char *dummypage; +#endif }; /** * struct chip_data - To maintain runtime state of SSP for each client chip - * @cr0: Value of control register CR0 of SSP + * @cr0: Value of control register CR0 of SSP - on later ST variants this + * register is 32 bits wide rather than just 16 * @cr1: Value of control register CR1 of SSP * @dmacr: Value of DMA control Register of SSP * @cpsr: Value of Clock prescale register @@ -382,12 +411,12 @@ struct pl022 { * This would be set according to the current message that would be served */ struct chip_data { - u16 cr0; + u32 cr0; u16 cr1; u16 dmacr; u16 cpsr; u8 n_bytes; - u8 enable_dma:1; + bool enable_dma; enum ssp_reading read; enum ssp_writing write; void (*cs_control) (u32 command); @@ -484,8 +513,9 @@ static void giveback(struct pl022 *pl022) msg->state = NULL; if (msg->complete) msg->complete(msg->context); - /* This message is completed, so let's turn off the clock! */ + /* This message is completed, so let's turn off the clocks! */ clk_disable(pl022->clk); + amba_pclk_disable(pl022->adev); } /** @@ -501,6 +531,9 @@ static int flush(struct pl022 *pl022) while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) readw(SSP_DR(pl022->virtbase)); } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); + + pl022->exp_fifo_level = 0; + return limit; } @@ -512,7 +545,10 @@ static void restore_state(struct pl022 *pl022) { struct chip_data *chip = pl022->cur_chip; - writew(chip->cr0, SSP_CR0(pl022->virtbase)); + if (pl022->vendor->extended_cr) + writel(chip->cr0, SSP_CR0(pl022->virtbase)); + else + writew(chip->cr0, SSP_CR0(pl022->virtbase)); writew(chip->cr1, SSP_CR1(pl022->virtbase)); writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); @@ -520,38 +556,70 @@ static void restore_state(struct pl022 *pl022) writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); } -/** - * load_ssp_default_config - Load default configuration for SSP - * @pl022: SSP driver private data structure - */ - /* * Default SSP Register Values */ #define DEFAULT_SSP_REG_CR0 ( \ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ - GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \ + GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ + GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ + GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ +) + +/* ST versions have slightly different bit layout */ +#define DEFAULT_SSP_REG_CR0_ST ( \ + GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ + GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ - GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ - GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \ - GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ + GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \ + GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \ +) + +/* The PL023 version is slightly different again */ +#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \ + GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ + GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ + GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ ) #define DEFAULT_SSP_REG_CR1 ( \ GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ + GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ +) + +/* ST versions extend this register to use all 16 bits */ +#define DEFAULT_SSP_REG_CR1_ST ( \ + DEFAULT_SSP_REG_CR1 | \ + GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ + GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ + GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\ + GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ + GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \ +) + +/* + * The PL023 variant has further differences: no loopback mode, no microwire + * support, and a new clock feedback delay setting. + */ +#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \ + GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ + GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ - GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \ - GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \ - GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\ - GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \ - GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \ + GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ + GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ + GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ + GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \ + GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \ ) #define DEFAULT_SSP_REG_CPSR ( \ - GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ + GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ ) #define DEFAULT_SSP_REG_DMACR (\ @@ -559,11 +627,22 @@ static void restore_state(struct pl022 *pl022) GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ ) - +/** + * load_ssp_default_config - Load default configuration for SSP + * @pl022: SSP driver private data structure + */ static void load_ssp_default_config(struct pl022 *pl022) { - writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); - writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); + if (pl022->vendor->pl023) { + writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase)); + } else if (pl022->vendor->extended_cr) { + writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase)); + } else { + writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); + } writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); @@ -583,10 +662,9 @@ static void readwriter(struct pl022 *pl022) * errons in 8bit wide transfers on ARM variants (just 8 words * FIFO, means only 8x8 = 64 bits in FIFO) at least. * - * FIXME: currently we have no logic to account for this. - * perhaps there is even something broken in HW regarding - * 8bit transfers (it doesn't fail on 16bit) so this needs - * more investigation... + * To prevent this issue, the TX FIFO is only filled to the + * unused RX FIFO fill length, regardless of what the TX + * FIFO status flag indicates. */ dev_dbg(&pl022->adev->dev, "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", @@ -613,11 +691,12 @@ static void readwriter(struct pl022 *pl022) break; } pl022->rx += (pl022->cur_chip->n_bytes); + pl022->exp_fifo_level--; } /* - * Write as much as you can, while keeping an eye on the RX FIFO! + * Write as much as possible up to the RX FIFO size */ - while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) + while ((pl022->exp_fifo_level < pl022->vendor->fifodepth) && (pl022->tx < pl022->tx_end)) { switch (pl022->write) { case WRITING_NULL: @@ -634,6 +713,7 @@ static void readwriter(struct pl022 *pl022) break; } pl022->tx += (pl022->cur_chip->n_bytes); + pl022->exp_fifo_level++; /* * This inner reader takes care of things appearing in the RX * FIFO as we're transmitting. This will happen a lot since the @@ -660,6 +740,7 @@ static void readwriter(struct pl022 *pl022) break; } pl022->rx += (pl022->cur_chip->n_bytes); + pl022->exp_fifo_level--; } } /* @@ -692,6 +773,371 @@ static void *next_transfer(struct pl022 *pl022) } return STATE_DONE; } + +/* + * This DMA functionality is only compiled in if we have + * access to the generic DMA devices/DMA engine. + */ +#ifdef CONFIG_DMA_ENGINE +static void unmap_free_dma_scatter(struct pl022 *pl022) +{ + /* Unmap and free the SG tables */ + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, DMA_FROM_DEVICE); + sg_free_table(&pl022->sgt_rx); + sg_free_table(&pl022->sgt_tx); +} + +static void dma_callback(void *data) +{ + struct pl022 *pl022 = data; + struct spi_message *msg = pl022->cur_msg; + + BUG_ON(!pl022->sgt_rx.sgl); + +#ifdef VERBOSE_DEBUG + /* + * Optionally dump out buffers to inspect contents, this is + * good if you want to convince yourself that the loopback + * read/write contents are the same, when adopting to a new + * DMA engine. + */ + { + struct scatterlist *sg; + unsigned int i; + + dma_sync_sg_for_cpu(&pl022->adev->dev, + pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, + DMA_FROM_DEVICE); + + for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { + dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); + print_hex_dump(KERN_ERR, "SPI RX: ", + DUMP_PREFIX_OFFSET, + 16, + 1, + sg_virt(sg), + sg_dma_len(sg), + 1); + } + for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { + dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); + print_hex_dump(KERN_ERR, "SPI TX: ", + DUMP_PREFIX_OFFSET, + 16, + 1, + sg_virt(sg), + sg_dma_len(sg), + 1); + } + } +#endif + + unmap_free_dma_scatter(pl022); + + /* Update total bytes transfered */ + msg->actual_length += pl022->cur_transfer->len; + if (pl022->cur_transfer->cs_change) + pl022->cur_chip-> + cs_control(SSP_CHIP_DESELECT); + + /* Move to next transfer */ + msg->state = next_transfer(pl022); + tasklet_schedule(&pl022->pump_transfers); +} + +static void setup_dma_scatter(struct pl022 *pl022, + void *buffer, + unsigned int length, + struct sg_table *sgtab) +{ + struct scatterlist *sg; + int bytesleft = length; + void *bufp = buffer; + int mapbytes; + int i; + + if (buffer) { + for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { + /* + * If there are less bytes left than what fits + * in the current page (plus page alignment offset) + * we just feed in this, else we stuff in as much + * as we can. + */ + if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) + mapbytes = bytesleft; + else + mapbytes = PAGE_SIZE - offset_in_page(bufp); + sg_set_page(sg, virt_to_page(bufp), + mapbytes, offset_in_page(bufp)); + bufp += mapbytes; + bytesleft -= mapbytes; + dev_dbg(&pl022->adev->dev, + "set RX/TX target page @ %p, %d bytes, %d left\n", + bufp, mapbytes, bytesleft); + } + } else { + /* Map the dummy buffer on every page */ + for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { + if (bytesleft < PAGE_SIZE) + mapbytes = bytesleft; + else + mapbytes = PAGE_SIZE; + sg_set_page(sg, virt_to_page(pl022->dummypage), + mapbytes, 0); + bytesleft -= mapbytes; + dev_dbg(&pl022->adev->dev, + "set RX/TX to dummy page %d bytes, %d left\n", + mapbytes, bytesleft); + + } + } + BUG_ON(bytesleft); +} + +/** + * configure_dma - configures the channels for the next transfer + * @pl022: SSP driver's private data structure + */ +static int configure_dma(struct pl022 *pl022) +{ + struct dma_slave_config rx_conf = { + .src_addr = SSP_DR(pl022->phybase), + .direction = DMA_FROM_DEVICE, + .src_maxburst = pl022->vendor->fifodepth >> 1, + }; + struct dma_slave_config tx_conf = { + .dst_addr = SSP_DR(pl022->phybase), + .direction = DMA_TO_DEVICE, + .dst_maxburst = pl022->vendor->fifodepth >> 1, + }; + unsigned int pages; + int ret; + int sglen; + struct dma_chan *rxchan = pl022->dma_rx_channel; + struct dma_chan *txchan = pl022->dma_tx_channel; + struct dma_async_tx_descriptor *rxdesc; + struct dma_async_tx_descriptor *txdesc; + dma_cookie_t cookie; + + /* Check that the channels are available */ + if (!rxchan || !txchan) + return -ENODEV; + + switch (pl022->read) { + case READING_NULL: + /* Use the same as for writing */ + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; + break; + case READING_U8: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + case READING_U16: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + case READING_U32: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + break; + } + + switch (pl022->write) { + case WRITING_NULL: + /* Use the same as for reading */ + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; + break; + case WRITING_U8: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + case WRITING_U16: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + case WRITING_U32: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;; + break; + } + + /* SPI pecularity: we need to read and write the same width */ + if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + rx_conf.src_addr_width = tx_conf.dst_addr_width; + if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + tx_conf.dst_addr_width = rx_conf.src_addr_width; + BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); + + rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, + (unsigned long) &rx_conf); + txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, + (unsigned long) &tx_conf); + + /* Create sglists for the transfers */ + pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; + dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); + + ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); + if (ret) + goto err_alloc_rx_sg; + + ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); + if (ret) + goto err_alloc_tx_sg; + + /* Fill in the scatterlists for the RX+TX buffers */ + setup_dma_scatter(pl022, pl022->rx, + pl022->cur_transfer->len, &pl022->sgt_rx); + setup_dma_scatter(pl022, pl022->tx, + pl022->cur_transfer->len, &pl022->sgt_tx); + + /* Map DMA buffers */ + sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, DMA_FROM_DEVICE); + if (!sglen) + goto err_rx_sgmap; + + sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); + if (!sglen) + goto err_tx_sgmap; + + /* Send both scatterlists */ + rxdesc = rxchan->device->device_prep_slave_sg(rxchan, + pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, + DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + goto err_rxdesc; + + txdesc = txchan->device->device_prep_slave_sg(txchan, + pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, + DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + goto err_txdesc; + + /* Put the callback on the RX transfer only, that should finish last */ + rxdesc->callback = dma_callback; + rxdesc->callback_param = pl022; + + /* Submit and fire RX and TX with TX last so we're ready to read! */ + cookie = rxdesc->tx_submit(rxdesc); + if (dma_submit_error(cookie)) + goto err_submit_rx; + cookie = txdesc->tx_submit(txdesc); + if (dma_submit_error(cookie)) + goto err_submit_tx; + rxchan->device->device_issue_pending(rxchan); + txchan->device->device_issue_pending(txchan); + + return 0; + +err_submit_tx: +err_submit_rx: +err_txdesc: + txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); +err_rxdesc: + rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); +err_tx_sgmap: + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, + pl022->sgt_tx.nents, DMA_FROM_DEVICE); +err_rx_sgmap: + sg_free_table(&pl022->sgt_tx); +err_alloc_tx_sg: + sg_free_table(&pl022->sgt_rx); +err_alloc_rx_sg: + return -ENOMEM; +} + +static int __init pl022_dma_probe(struct pl022 *pl022) +{ + dma_cap_mask_t mask; + + /* Try to acquire a generic DMA engine slave channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + /* + * We need both RX and TX channels to do DMA, else do none + * of them. + */ + pl022->dma_rx_channel = dma_request_channel(mask, + pl022->master_info->dma_filter, + pl022->master_info->dma_rx_param); + if (!pl022->dma_rx_channel) { + dev_err(&pl022->adev->dev, "no RX DMA channel!\n"); + goto err_no_rxchan; + } + + pl022->dma_tx_channel = dma_request_channel(mask, + pl022->master_info->dma_filter, + pl022->master_info->dma_tx_param); + if (!pl022->dma_tx_channel) { + dev_err(&pl022->adev->dev, "no TX DMA channel!\n"); + goto err_no_txchan; + } + + pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pl022->dummypage) { + dev_err(&pl022->adev->dev, "no DMA dummypage!\n"); + goto err_no_dummypage; + } + + dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", + dma_chan_name(pl022->dma_rx_channel), + dma_chan_name(pl022->dma_tx_channel)); + + return 0; + +err_no_dummypage: + dma_release_channel(pl022->dma_tx_channel); +err_no_txchan: + dma_release_channel(pl022->dma_rx_channel); + pl022->dma_rx_channel = NULL; +err_no_rxchan: + return -ENODEV; +} + +static void terminate_dma(struct pl022 *pl022) +{ + struct dma_chan *rxchan = pl022->dma_rx_channel; + struct dma_chan *txchan = pl022->dma_tx_channel; + + rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); + txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); + unmap_free_dma_scatter(pl022); +} + +static void pl022_dma_remove(struct pl022 *pl022) +{ + if (pl022->busy) + terminate_dma(pl022); + if (pl022->dma_tx_channel) + dma_release_channel(pl022->dma_tx_channel); + if (pl022->dma_rx_channel) + dma_release_channel(pl022->dma_rx_channel); + kfree(pl022->dummypage); +} + +#else +static inline int configure_dma(struct pl022 *pl022) +{ + return -ENODEV; +} + +static inline int pl022_dma_probe(struct pl022 *pl022) +{ + return 0; +} + +static inline void pl022_dma_remove(struct pl022 *pl022) +{ +} +#endif + /** * pl022_interrupt_handler - Interrupt handler for SSP controller * @@ -723,14 +1169,17 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) if (unlikely(!irq_status)) return IRQ_NONE; - /* This handles the error code interrupts */ + /* + * This handles the FIFO interrupts, the timeout + * interrupts are flatly ignored, they cannot be + * trusted. + */ if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { /* * Overrun interrupt - bail out since our Data has been * corrupted */ - dev_err(&pl022->adev->dev, - "FIFO overrun\n"); + dev_err(&pl022->adev->dev, "FIFO overrun\n"); if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) dev_err(&pl022->adev->dev, "RXFIFO is full\n"); @@ -825,8 +1274,8 @@ static int set_up_next_transfer(struct pl022 *pl022, } /** - * pump_transfers - Tasklet function which schedules next interrupt transfer - * when running in interrupt transfer mode. + * pump_transfers - Tasklet function which schedules next transfer + * when running in interrupt or DMA transfer mode. * @data: SSP driver private data structure * */ @@ -883,65 +1332,23 @@ static void pump_transfers(unsigned long data) } /* Flush the FIFOs and let's go! */ flush(pl022); - writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); -} -/** - * NOT IMPLEMENTED - * configure_dma - It configures the DMA pipes for DMA transfers - * @data: SSP driver's private data structure - * - */ -static int configure_dma(void *data) -{ - struct pl022 *pl022 = data; - dev_dbg(&pl022->adev->dev, "configure DMA\n"); - return -ENOTSUPP; -} - -/** - * do_dma_transfer - It handles transfers of the current message - * if it is DMA xfer. - * NOT FULLY IMPLEMENTED - * @data: SSP driver's private data structure - */ -static void do_dma_transfer(void *data) -{ - struct pl022 *pl022 = data; - - if (configure_dma(data)) { - dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n"); - goto err_config_dma; - } - - /* TODO: Implememt DMA setup of pipes here */ - - /* Enable target chip, set up transfer */ - pl022->cur_chip->cs_control(SSP_CHIP_SELECT); - if (set_up_next_transfer(pl022, pl022->cur_transfer)) { - /* Error path */ - pl022->cur_msg->state = STATE_ERROR; - pl022->cur_msg->status = -EIO; - giveback(pl022); + if (pl022->cur_chip->enable_dma) { + if (configure_dma(pl022)) { + dev_dbg(&pl022->adev->dev, + "configuration of DMA failed, fall back to interrupt mode\n"); + goto err_config_dma; + } return; } - /* Enable SSP */ - writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), - SSP_CR1(pl022->virtbase)); - /* TODO: Enable the DMA transfer here */ - return; - - err_config_dma: - pl022->cur_msg->state = STATE_ERROR; - pl022->cur_msg->status = -EIO; - giveback(pl022); - return; +err_config_dma: + writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); } -static void do_interrupt_transfer(void *data) +static void do_interrupt_dma_transfer(struct pl022 *pl022) { - struct pl022 *pl022 = data; + u32 irqflags = ENABLE_ALL_INTERRUPTS; /* Enable target chip */ pl022->cur_chip->cs_control(SSP_CHIP_SELECT); @@ -952,15 +1359,26 @@ static void do_interrupt_transfer(void *data) giveback(pl022); return; } + /* If we're using DMA, set up DMA here */ + if (pl022->cur_chip->enable_dma) { + /* Configure DMA transfer */ + if (configure_dma(pl022)) { + dev_dbg(&pl022->adev->dev, + "configuration of DMA failed, fall back to interrupt mode\n"); + goto err_config_dma; + } + /* Disable interrupts in DMA mode, IRQ from DMA controller */ + irqflags = DISABLE_ALL_INTERRUPTS; + } +err_config_dma: /* Enable SSP, turn on interrupts */ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); - writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); + writew(irqflags, SSP_IMSC(pl022->virtbase)); } -static void do_polling_transfer(void *data) +static void do_polling_transfer(struct pl022 *pl022) { - struct pl022 *pl022 = data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; @@ -1001,7 +1419,7 @@ static void do_polling_transfer(void *data) writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); - dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n"); + dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); /* FIXME: insert a timeout so we don't hang here indefinately */ while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) readwriter(pl022); @@ -1030,7 +1448,7 @@ static void do_polling_transfer(void *data) * * This function checks if there is any spi message in the queue that * needs processing and delegate control to appropriate function - * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer() + * do_polling_transfer()/do_interrupt_dma_transfer() * based on the kind of the transfer * */ @@ -1069,19 +1487,18 @@ static void pump_messages(struct work_struct *work) /* Setup the SPI using the per chip configuration */ pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); /* - * We enable the clock here, then the clock will be disabled when + * We enable the clocks here, then the clocks will be disabled when * giveback() is called in each method (poll/interrupt/DMA) */ + amba_pclk_enable(pl022->adev); clk_enable(pl022->clk); restore_state(pl022); flush(pl022); if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) do_polling_transfer(pl022); - else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER) - do_interrupt_transfer(pl022); else - do_dma_transfer(pl022); + do_interrupt_dma_transfer(pl022); } @@ -1141,7 +1558,6 @@ static int stop_queue(struct pl022 *pl022) * A wait_queue on the pl022->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ - pl022->run = QUEUE_STOPPED; while (!list_empty(&pl022->queue) && pl022->busy && limit--) { spin_unlock_irqrestore(&pl022->queue_lock, flags); msleep(10); @@ -1150,6 +1566,7 @@ static int stop_queue(struct pl022 *pl022) if (!list_empty(&pl022->queue) || pl022->busy) status = -EBUSY; + else pl022->run = QUEUE_STOPPED; spin_unlock_irqrestore(&pl022->queue_lock, flags); @@ -1176,116 +1593,78 @@ static int destroy_queue(struct pl022 *pl022) } static int verify_controller_parameters(struct pl022 *pl022, - struct pl022_config_chip *chip_info) + struct pl022_config_chip const *chip_info) { - if ((chip_info->lbm != LOOPBACK_ENABLED) - && (chip_info->lbm != LOOPBACK_DISABLED)) { - dev_err(chip_info->dev, - "loopback Mode is configured incorrectly\n"); - return -EINVAL; - } if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "interface is configured incorrectly\n"); return -EINVAL; } if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && (!pl022->vendor->unidir)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "unidirectional mode not supported in this " "hardware version\n"); return -EINVAL; } if ((chip_info->hierarchy != SSP_MASTER) && (chip_info->hierarchy != SSP_SLAVE)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "hierarchy is configured incorrectly\n"); return -EINVAL; } - if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN) - || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) { - dev_err(chip_info->dev, - "cpsdvsr is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->endian_rx != SSP_RX_MSB) - && (chip_info->endian_rx != SSP_RX_LSB)) { - dev_err(chip_info->dev, - "RX FIFO endianess is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->endian_tx != SSP_TX_MSB) - && (chip_info->endian_tx != SSP_TX_LSB)) { - dev_err(chip_info->dev, - "TX FIFO endianess is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->data_size < SSP_DATA_BITS_4) - || (chip_info->data_size > SSP_DATA_BITS_32)) { - dev_err(chip_info->dev, - "DATA Size is configured incorrectly\n"); - return -EINVAL; - } if ((chip_info->com_mode != INTERRUPT_TRANSFER) && (chip_info->com_mode != DMA_TRANSFER) && (chip_info->com_mode != POLLING_TRANSFER)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "Communication mode is configured incorrectly\n"); return -EINVAL; } if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "RX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "TX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } - if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) { - if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE) - && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) { - dev_err(chip_info->dev, - "Clock Phase is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW) - && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) { - dev_err(chip_info->dev, - "Clock Polarity is configured incorrectly\n"); - return -EINVAL; - } - } if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { if ((chip_info->ctrl_len < SSP_BITS_4) || (chip_info->ctrl_len > SSP_BITS_32)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "CTRL LEN is configured incorrectly\n"); return -EINVAL; } if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "Wait State is configured incorrectly\n"); return -EINVAL; } - if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) - && (chip_info->duplex != - SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { - dev_err(chip_info->dev, - "DUPLEX is configured incorrectly\n"); + /* Half duplex is only available in the ST Micro version */ + if (pl022->vendor->extended_cr) { + if ((chip_info->duplex != + SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) + && (chip_info->duplex != + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { + dev_err(&pl022->adev->dev, + "Microwire duplex mode is configured incorrectly\n"); + return -EINVAL; + } + } else { + if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) + dev_err(&pl022->adev->dev, + "Microwire half duplex mode requested," + " but this is only available in the" + " ST version of PL022\n"); return -EINVAL; } } - if (chip_info->cs_control == NULL) { - dev_warn(chip_info->dev, - "Chip Select Function is NULL for this chip\n"); - chip_info->cs_control = null_cs_control; - } return 0; } @@ -1385,22 +1764,24 @@ static int calculate_effective_freq(struct pl022 *pl022, return 0; } -/** - * NOT IMPLEMENTED - * process_dma_info - Processes the DMA info provided by client drivers - * @chip_info: chip info provided by client device - * @chip: Runtime state maintained by the SSP controller for each spi device - * - * This function processes and stores DMA config provided by client driver - * into the runtime state maintained by the SSP controller driver + +/* + * A piece of default chip info unless the platform + * supplies it. */ -static int process_dma_info(struct pl022_config_chip *chip_info, - struct chip_data *chip) -{ - dev_err(chip_info->dev, - "cannot process DMA info, DMA not implemented!\n"); - return -ENOTSUPP; -} +static const struct pl022_config_chip pl022_default_chip_info = { + .com_mode = POLLING_TRANSFER, + .iface = SSP_INTERFACE_MOTOROLA_SPI, + .hierarchy = SSP_SLAVE, + .slave_tx_disable = DO_NOT_DRIVE_TX, + .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, + .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, + .ctrl_len = SSP_BITS_8, + .wait_state = SSP_MWIRE_WAIT_ZERO, + .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, + .cs_control = null_cs_control, +}; + /** * pl022_setup - setup function registered to SPI master framework @@ -1414,23 +1795,15 @@ static int process_dma_info(struct pl022_config_chip *chip_info, * controller hardware here, that is not done until the actual transfer * commence. */ - -/* FIXME: JUST GUESSING the spi->mode bits understood by this driver */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ - | SPI_LSB_FIRST | SPI_LOOP) - static int pl022_setup(struct spi_device *spi) { - struct pl022_config_chip *chip_info; + struct pl022_config_chip const *chip_info; struct chip_data *chip; + struct ssp_clock_params clk_freq; int status = 0; struct pl022 *pl022 = spi_master_get_devdata(spi->master); - - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } + unsigned int bits = spi->bits_per_word; + u32 tmp; if (!spi->max_speed_hz) return -EINVAL; @@ -1453,48 +1826,13 @@ static int pl022_setup(struct spi_device *spi) chip_info = spi->controller_data; if (chip_info == NULL) { + chip_info = &pl022_default_chip_info; /* spi_board_info.controller_data not is supplied */ dev_dbg(&spi->dev, "using default controller_data settings\n"); - - chip_info = - kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL); - - if (!chip_info) { - dev_err(&spi->dev, - "cannot allocate controller data\n"); - status = -ENOMEM; - goto err_first_setup; - } - - dev_dbg(&spi->dev, "allocated memory for controller data\n"); - - /* Pointer back to the SPI device */ - chip_info->dev = &spi->dev; - /* - * Set controller data default values: - * Polling is supported by default - */ - chip_info->lbm = LOOPBACK_DISABLED; - chip_info->com_mode = POLLING_TRANSFER; - chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI; - chip_info->hierarchy = SSP_SLAVE; - chip_info->slave_tx_disable = DO_NOT_DRIVE_TX; - chip_info->endian_tx = SSP_TX_LSB; - chip_info->endian_rx = SSP_RX_LSB; - chip_info->data_size = SSP_DATA_BITS_12; - chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM; - chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC; - chip_info->clk_phase = SSP_CLK_SECOND_EDGE; - chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW; - chip_info->ctrl_len = SSP_BITS_8; - chip_info->wait_state = SSP_MWIRE_WAIT_ZERO; - chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX; - chip_info->cs_control = null_cs_control; - } else { + } else dev_dbg(&spi->dev, "using user supplied controller_data settings\n"); - } /* * We can override with custom divisors, else we use the board @@ -1504,29 +1842,48 @@ static int pl022_setup(struct spi_device *spi) && (0 == chip_info->clk_freq.scr)) { status = calculate_effective_freq(pl022, spi->max_speed_hz, - &chip_info->clk_freq); + &clk_freq); if (status < 0) goto err_config_params; } else { - if ((chip_info->clk_freq.cpsdvsr % 2) != 0) - chip_info->clk_freq.cpsdvsr = - chip_info->clk_freq.cpsdvsr - 1; + memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); + if ((clk_freq.cpsdvsr % 2) != 0) + clk_freq.cpsdvsr = + clk_freq.cpsdvsr - 1; } + if ((clk_freq.cpsdvsr < CPSDVR_MIN) + || (clk_freq.cpsdvsr > CPSDVR_MAX)) { + dev_err(&spi->dev, + "cpsdvsr is configured incorrectly\n"); + goto err_config_params; + } + + status = verify_controller_parameters(pl022, chip_info); if (status) { dev_err(&spi->dev, "controller data is incorrect"); goto err_config_params; } + /* Now set controller state based on controller data */ chip->xfer_type = chip_info->com_mode; - chip->cs_control = chip_info->cs_control; - - if (chip_info->data_size <= 8) { - dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n"); + if (!chip_info->cs_control) { + chip->cs_control = null_cs_control; + dev_warn(&spi->dev, + "chip select function is NULL for this chip\n"); + } else + chip->cs_control = chip_info->cs_control; + + if (bits <= 3) { + /* PL022 doesn't support less than 4-bits */ + status = -ENOTSUPP; + goto err_config_params; + } else if (bits <= 8) { + dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); chip->n_bytes = 1; chip->read = READING_U8; chip->write = WRITING_U8; - } else if (chip_info->data_size <= 16) { + } else if (bits <= 16) { dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); chip->n_bytes = 2; chip->read = READING_U16; @@ -1543,6 +1900,7 @@ static int pl022_setup(struct spi_device *spi) dev_err(&spi->dev, "a standard pl022 can only handle " "1 <= n <= 16 bit words\n"); + status = -ENOTSUPP; goto err_config_params; } } @@ -1554,9 +1912,8 @@ static int pl022_setup(struct spi_device *spi) chip->cpsr = 0; if ((chip_info->com_mode == DMA_TRANSFER) && ((pl022->master_info)->enable_dma)) { - chip->enable_dma = 1; + chip->enable_dma = true; dev_dbg(&spi->dev, "DMA mode set in controller state\n"); - status = process_dma_info(chip_info, chip); if (status < 0) goto err_config_params; SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, @@ -1564,7 +1921,7 @@ static int pl022_setup(struct spi_device *spi) SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, SSP_DMACR_MASK_TXDMAE, 1); } else { - chip->enable_dma = 0; + chip->enable_dma = false; dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0); @@ -1572,30 +1929,81 @@ static int pl022_setup(struct spi_device *spi) SSP_DMACR_MASK_TXDMAE, 1); } - chip->cpsr = chip_info->clk_freq.cpsdvsr; + chip->cpsr = clk_freq.cpsdvsr; + + /* Special setup for the ST micro extended control registers */ + if (pl022->vendor->extended_cr) { + u32 etx; + + if (pl022->vendor->pl023) { + /* These bits are only in the PL023 */ + SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, + SSP_CR1_MASK_FBCLKDEL_ST, 13); + } else { + /* These bits are in the PL022 but not PL023 */ + SSP_WRITE_BITS(chip->cr0, chip_info->duplex, + SSP_CR0_MASK_HALFDUP_ST, 5); + SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, + SSP_CR0_MASK_CSS_ST, 16); + SSP_WRITE_BITS(chip->cr0, chip_info->iface, + SSP_CR0_MASK_FRF_ST, 21); + SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, + SSP_CR1_MASK_MWAIT_ST, 6); + } + SSP_WRITE_BITS(chip->cr0, bits - 1, + SSP_CR0_MASK_DSS_ST, 0); + + if (spi->mode & SPI_LSB_FIRST) { + tmp = SSP_RX_LSB; + etx = SSP_TX_LSB; + } else { + tmp = SSP_RX_MSB; + etx = SSP_TX_MSB; + } + SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); + SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); + SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, + SSP_CR1_MASK_RXIFLSEL_ST, 7); + SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, + SSP_CR1_MASK_TXIFLSEL_ST, 10); + } else { + SSP_WRITE_BITS(chip->cr0, bits - 1, + SSP_CR0_MASK_DSS, 0); + SSP_WRITE_BITS(chip->cr0, chip_info->iface, + SSP_CR0_MASK_FRF, 4); + } + + /* Stuff that is common for all versions */ + if (spi->mode & SPI_CPOL) + tmp = SSP_CLK_POL_IDLE_HIGH; + else + tmp = SSP_CLK_POL_IDLE_LOW; + SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); - SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0); - SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5); - SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6); - SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7); - SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8); - SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16); - SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21); - SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0); + if (spi->mode & SPI_CPHA) + tmp = SSP_CLK_SECOND_EDGE; + else + tmp = SSP_CLK_FIRST_EDGE; + SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); + + SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); + /* Loopback is available on all versions except PL023 */ + if (!pl022->vendor->pl023) { + if (spi->mode & SPI_LOOP) + tmp = LOOPBACK_ENABLED; + else + tmp = LOOPBACK_DISABLED; + SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); + } SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); - SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4); - SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5); - SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6); - SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7); - SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10); /* Save controller_state */ spi_set_ctldata(spi, chip); return status; err_config_params: - err_first_setup: + spi_set_ctldata(spi, NULL); kfree(chip); return status; } @@ -1616,7 +2024,7 @@ static void pl022_cleanup(struct spi_device *spi) } -static int __init +static int __devinit pl022_probe(struct amba_device *adev, struct amba_id *id) { struct device *dev = &adev->dev; @@ -1657,12 +2065,21 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) master->setup = pl022_setup; master->transfer = pl022_transfer; + /* + * Supports mode 0-3, loopback, and active low CS. Transfers are + * always MS bit first on the original pl022. + */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; + if (pl022->vendor->extended_cr) + master->mode_bits |= SPI_LSB_FIRST; + dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); status = amba_request_regions(adev, NULL); if (status) goto err_no_ioregion; + pl022->phybase = adev->res.start; pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); if (pl022->virtbase == NULL) { status = -ENOMEM; @@ -1679,11 +2096,9 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) } /* Disable SSP */ - clk_enable(pl022->clk); writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); load_ssp_default_config(pl022); - clk_disable(pl022->clk); status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", pl022); @@ -1691,6 +2106,14 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); goto err_no_irq; } + + /* Get DMA channels */ + if (platform_info->enable_dma) { + status = pl022_dma_probe(pl022); + if (status != 0) + goto err_no_dma; + } + /* Initialize and start queue */ status = init_queue(pl022); if (status != 0) { @@ -1711,12 +2134,16 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) goto err_spi_register; } dev_dbg(dev, "probe succeded\n"); + /* Disable the silicon block pclk and clock it when needed */ + amba_pclk_disable(adev); return 0; err_spi_register: err_start_queue: err_init_queue: destroy_queue(pl022); + pl022_dma_remove(pl022); + err_no_dma: free_irq(adev->irq[0], pl022); err_no_irq: clk_put(pl022->clk); @@ -1731,7 +2158,7 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) return status; } -static int __exit +static int __devexit pl022_remove(struct amba_device *adev) { struct pl022 *pl022 = amba_get_drvdata(adev); @@ -1747,6 +2174,7 @@ pl022_remove(struct amba_device *adev) return status; } load_ssp_default_config(pl022); + pl022_dma_remove(pl022); free_irq(adev->irq[0], pl022); clk_disable(pl022->clk); clk_put(pl022->clk); @@ -1772,9 +2200,9 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state) return status; } - clk_enable(pl022->clk); + amba_pclk_enable(adev); load_ssp_default_config(pl022); - clk_disable(pl022->clk); + amba_pclk_disable(adev); dev_dbg(&adev->dev, "suspended\n"); return 0; } @@ -1802,6 +2230,8 @@ static struct vendor_data vendor_arm = { .fifodepth = 8, .max_bpw = 16, .unidir = false, + .extended_cr = false, + .pl023 = false, }; @@ -1809,6 +2239,16 @@ static struct vendor_data vendor_st = { .fifodepth = 32, .max_bpw = 32, .unidir = false, + .extended_cr = true, + .pl023 = false, +}; + +static struct vendor_data vendor_st_pl023 = { + .fifodepth = 32, + .max_bpw = 32, + .unidir = false, + .extended_cr = true, + .pl023 = true, }; static struct amba_id pl022_ids[] = { @@ -1830,6 +2270,18 @@ static struct amba_id pl022_ids[] = { .mask = 0xffffffff, .data = &vendor_st, }, + { + /* + * ST-Ericsson derivative "PL023" (this is not + * an official ARM number), this is a PL022 SSP block + * stripped to SPI mode only, it has 32bit wide + * and 32 locations deep TX/RX FIFO but no extended + * CR0/CR1 register + */ + .id = 0x00080023, + .mask = 0xffffffff, + .data = &vendor_st_pl023, + }, { 0, 0 }, }; @@ -1839,7 +2291,7 @@ static struct amba_driver pl022_driver = { }, .id_table = pl022_ids, .probe = pl022_probe, - .remove = __exit_p(pl022_remove), + .remove = __devexit_p(pl022_remove), .suspend = pl022_suspend, .resume = pl022_resume, }; @@ -1850,7 +2302,7 @@ static int __init pl022_init(void) return amba_driver_register(&pl022_driver); } -module_init(pl022_init); +subsys_initcall(pl022_init); static void __exit pl022_exit(void) { diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index d21c24eaf0a..154529aacc0 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c @@ -18,6 +18,7 @@ #include <linux/err.h> #include <linux/interrupt.h> #include <linux/spi/spi.h> +#include <linux/slab.h> #include <asm/io.h> #include <mach/board.h> @@ -653,6 +654,8 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) struct spi_transfer *xfer; unsigned long flags; struct device *controller = spi->master->dev.parent; + u8 bits; + struct atmel_spi_device *asd; as = spi_master_get_devdata(spi->master); @@ -671,8 +674,18 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) return -EINVAL; } + if (xfer->bits_per_word) { + asd = spi->controller_state; + bits = (asd->csr >> 4) & 0xf; + if (bits != xfer->bits_per_word - 8) { + dev_dbg(&spi->dev, "you can't yet change " + "bits_per_word in transfers\n"); + return -ENOPROTOOPT; + } + } + /* FIXME implement these protocol options!! */ - if (xfer->bits_per_word || xfer->speed_hz) { + if (xfer->speed_hz) { dev_dbg(&spi->dev, "no protocol options yet\n"); return -ENOPROTOOPT; } diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c index cfd5ff9508f..3c9ade69643 100644 --- a/drivers/spi/au1550_spi.c +++ b/drivers/spi/au1550_spi.c @@ -23,6 +23,7 @@ #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/slab.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/platform_device.h> @@ -412,11 +413,13 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) } /* put buffers on the ring */ - res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, hw->rx, t->len); + res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx), + t->len, DDMA_FLAGS_IE); if (!res) dev_err(hw->dev, "rx dma put dest error\n"); - res = au1xxx_dbdma_put_source(hw->dma_tx_ch, (void *)hw->tx, t->len); + res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx), + t->len, DDMA_FLAGS_IE); if (!res) dev_err(hw->dev, "tx dma put source error\n"); diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c new file mode 100644 index 00000000000..052b3c7fa6a --- /dev/null +++ b/drivers/spi/coldfire_qspi.c @@ -0,0 +1,641 @@ +/* + * Freescale/Motorola Coldfire Queued SPI driver + * + * Copyright 2010 Steven King <sfking@fdwdc.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA + * +*/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/errno.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/spi/spi.h> + +#include <asm/coldfire.h> +#include <asm/mcfqspi.h> + +#define DRIVER_NAME "mcfqspi" + +#define MCFQSPI_BUSCLK (MCF_BUSCLK / 2) + +#define MCFQSPI_QMR 0x00 +#define MCFQSPI_QMR_MSTR 0x8000 +#define MCFQSPI_QMR_CPOL 0x0200 +#define MCFQSPI_QMR_CPHA 0x0100 +#define MCFQSPI_QDLYR 0x04 +#define MCFQSPI_QDLYR_SPE 0x8000 +#define MCFQSPI_QWR 0x08 +#define MCFQSPI_QWR_HALT 0x8000 +#define MCFQSPI_QWR_WREN 0x4000 +#define MCFQSPI_QWR_CSIV 0x1000 +#define MCFQSPI_QIR 0x0C +#define MCFQSPI_QIR_WCEFB 0x8000 +#define MCFQSPI_QIR_ABRTB 0x4000 +#define MCFQSPI_QIR_ABRTL 0x1000 +#define MCFQSPI_QIR_WCEFE 0x0800 +#define MCFQSPI_QIR_ABRTE 0x0400 +#define MCFQSPI_QIR_SPIFE 0x0100 +#define MCFQSPI_QIR_WCEF 0x0008 +#define MCFQSPI_QIR_ABRT 0x0004 +#define MCFQSPI_QIR_SPIF 0x0001 +#define MCFQSPI_QAR 0x010 +#define MCFQSPI_QAR_TXBUF 0x00 +#define MCFQSPI_QAR_RXBUF 0x10 +#define MCFQSPI_QAR_CMDBUF 0x20 +#define MCFQSPI_QDR 0x014 +#define MCFQSPI_QCR 0x014 +#define MCFQSPI_QCR_CONT 0x8000 +#define MCFQSPI_QCR_BITSE 0x4000 +#define MCFQSPI_QCR_DT 0x2000 + +struct mcfqspi { + void __iomem *iobase; + int irq; + struct clk *clk; + struct mcfqspi_cs_control *cs_control; + + wait_queue_head_t waitq; + + struct work_struct work; + struct workqueue_struct *workq; + spinlock_t lock; + struct list_head msgq; +}; + +static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QMR); +} + +static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QDLYR); +} + +static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi) +{ + return readw(mcfqspi->iobase + MCFQSPI_QDLYR); +} + +static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QWR); +} + +static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QIR); +} + +static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QAR); +} + +static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QDR); +} + +static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi) +{ + return readw(mcfqspi->iobase + MCFQSPI_QDR); +} + +static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select, + bool cs_high) +{ + mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high); +} + +static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select, + bool cs_high) +{ + mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high); +} + +static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi) +{ + return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ? + mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0; +} + +static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi) +{ + if (mcfqspi->cs_control && mcfqspi->cs_control->teardown) + mcfqspi->cs_control->teardown(mcfqspi->cs_control); +} + +static u8 mcfqspi_qmr_baud(u32 speed_hz) +{ + return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u); +} + +static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi) +{ + return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE; +} + +static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id) +{ + struct mcfqspi *mcfqspi = dev_id; + + /* clear interrupt */ + mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF); + wake_up(&mcfqspi->waitq); + + return IRQ_HANDLED; +} + +static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count, + const u8 *txbuf, u8 *rxbuf) +{ + unsigned i, n, offset = 0; + + n = min(count, 16u); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); + if (txbuf) + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + else + for (i = 0; i < count; ++i) + mcfqspi_wr_qdr(mcfqspi, 0); + + count -= n; + if (count) { + u16 qwr = 0xf08; + mcfqspi_wr_qwr(mcfqspi, 0x700); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + + do { + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } + n = min(count, 8u); + if (txbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_TXBUF + offset); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + } + qwr = (offset ? 0x808 : 0) + ((n - 1) << 8); + offset ^= 8; + count -= n; + } while (count); + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + offset ^= 8; + } + } else { + mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + } + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < n; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } +} + +static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count, + const u16 *txbuf, u16 *rxbuf) +{ + unsigned i, n, offset = 0; + + n = min(count, 16u); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); + if (txbuf) + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + else + for (i = 0; i < count; ++i) + mcfqspi_wr_qdr(mcfqspi, 0); + + count -= n; + if (count) { + u16 qwr = 0xf08; + mcfqspi_wr_qwr(mcfqspi, 0x700); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + + do { + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } + n = min(count, 8u); + if (txbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_TXBUF + offset); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + } + qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8); + offset ^= 8; + count -= n; + } while (count); + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + offset ^= 8; + } + } else { + mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + } + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < n; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } +} + +static void mcfqspi_work(struct work_struct *work) +{ + struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work); + unsigned long flags; + + spin_lock_irqsave(&mcfqspi->lock, flags); + while (!list_empty(&mcfqspi->msgq)) { + struct spi_message *msg; + struct spi_device *spi; + struct spi_transfer *xfer; + int status = 0; + + msg = container_of(mcfqspi->msgq.next, struct spi_message, + queue); + + list_del_init(&mcfqspi->msgq); + spin_unlock_irqrestore(&mcfqspi->lock, flags); + + spi = msg->spi; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + bool cs_high = spi->mode & SPI_CS_HIGH; + u16 qmr = MCFQSPI_QMR_MSTR; + + if (xfer->bits_per_word) + qmr |= xfer->bits_per_word << 10; + else + qmr |= spi->bits_per_word << 10; + if (spi->mode & SPI_CPHA) + qmr |= MCFQSPI_QMR_CPHA; + if (spi->mode & SPI_CPOL) + qmr |= MCFQSPI_QMR_CPOL; + if (xfer->speed_hz) + qmr |= mcfqspi_qmr_baud(xfer->speed_hz); + else + qmr |= mcfqspi_qmr_baud(spi->max_speed_hz); + mcfqspi_wr_qmr(mcfqspi, qmr); + + mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high); + + mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE); + if ((xfer->bits_per_word ? xfer->bits_per_word : + spi->bits_per_word) == 8) + mcfqspi_transfer_msg8(mcfqspi, xfer->len, + xfer->tx_buf, + xfer->rx_buf); + else + mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2, + xfer->tx_buf, + xfer->rx_buf); + mcfqspi_wr_qir(mcfqspi, 0); + + if (xfer->delay_usecs) + udelay(xfer->delay_usecs); + if (xfer->cs_change) { + if (!list_is_last(&xfer->transfer_list, + &msg->transfers)) + mcfqspi_cs_deselect(mcfqspi, + spi->chip_select, + cs_high); + } else { + if (list_is_last(&xfer->transfer_list, + &msg->transfers)) + mcfqspi_cs_deselect(mcfqspi, + spi->chip_select, + cs_high); + } + msg->actual_length += xfer->len; + } + msg->status = status; + msg->complete(msg->context); + + spin_lock_irqsave(&mcfqspi->lock, flags); + } + spin_unlock_irqrestore(&mcfqspi->lock, flags); +} + +static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct mcfqspi *mcfqspi; + struct spi_transfer *xfer; + unsigned long flags; + + mcfqspi = spi_master_get_devdata(spi->master); + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (xfer->bits_per_word && ((xfer->bits_per_word < 8) + || (xfer->bits_per_word > 16))) { + dev_dbg(&spi->dev, + "%d bits per word is not supported\n", + xfer->bits_per_word); + goto fail; + } + if (xfer->speed_hz) { + u32 real_speed = MCFQSPI_BUSCLK / + mcfqspi_qmr_baud(xfer->speed_hz); + if (real_speed != xfer->speed_hz) + dev_dbg(&spi->dev, + "using speed %d instead of %d\n", + real_speed, xfer->speed_hz); + } + } + msg->status = -EINPROGRESS; + msg->actual_length = 0; + + spin_lock_irqsave(&mcfqspi->lock, flags); + list_add_tail(&msg->queue, &mcfqspi->msgq); + queue_work(mcfqspi->workq, &mcfqspi->work); + spin_unlock_irqrestore(&mcfqspi->lock, flags); + + return 0; +fail: + msg->status = -EINVAL; + return -EINVAL; +} + +static int mcfqspi_setup(struct spi_device *spi) +{ + if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) { + dev_dbg(&spi->dev, "%d bits per word is not supported\n", + spi->bits_per_word); + return -EINVAL; + } + if (spi->chip_select >= spi->master->num_chipselect) { + dev_dbg(&spi->dev, "%d chip select is out of range\n", + spi->chip_select); + return -EINVAL; + } + + mcfqspi_cs_deselect(spi_master_get_devdata(spi->master), + spi->chip_select, spi->mode & SPI_CS_HIGH); + + dev_dbg(&spi->dev, + "bits per word %d, chip select %d, speed %d KHz\n", + spi->bits_per_word, spi->chip_select, + (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz)) + / 1000); + + return 0; +} + +static int __devinit mcfqspi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct mcfqspi *mcfqspi; + struct resource *res; + struct mcfqspi_platform_data *pdata; + int status; + + master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi)); + if (master == NULL) { + dev_dbg(&pdev->dev, "spi_alloc_master failed\n"); + return -ENOMEM; + } + + mcfqspi = spi_master_get_devdata(master); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_dbg(&pdev->dev, "platform_get_resource failed\n"); + status = -ENXIO; + goto fail0; + } + + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { + dev_dbg(&pdev->dev, "request_mem_region failed\n"); + status = -EBUSY; + goto fail0; + } + + mcfqspi->iobase = ioremap(res->start, resource_size(res)); + if (!mcfqspi->iobase) { + dev_dbg(&pdev->dev, "ioremap failed\n"); + status = -ENOMEM; + goto fail1; + } + + mcfqspi->irq = platform_get_irq(pdev, 0); + if (mcfqspi->irq < 0) { + dev_dbg(&pdev->dev, "platform_get_irq failed\n"); + status = -ENXIO; + goto fail2; + } + + status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED, + pdev->name, mcfqspi); + if (status) { + dev_dbg(&pdev->dev, "request_irq failed\n"); + goto fail2; + } + + mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk"); + if (IS_ERR(mcfqspi->clk)) { + dev_dbg(&pdev->dev, "clk_get failed\n"); + status = PTR_ERR(mcfqspi->clk); + goto fail3; + } + clk_enable(mcfqspi->clk); + + mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent)); + if (!mcfqspi->workq) { + dev_dbg(&pdev->dev, "create_workqueue failed\n"); + status = -ENOMEM; + goto fail4; + } + INIT_WORK(&mcfqspi->work, mcfqspi_work); + spin_lock_init(&mcfqspi->lock); + INIT_LIST_HEAD(&mcfqspi->msgq); + init_waitqueue_head(&mcfqspi->waitq); + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_dbg(&pdev->dev, "platform data is missing\n"); + goto fail5; + } + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->num_chipselect; + + mcfqspi->cs_control = pdata->cs_control; + status = mcfqspi_cs_setup(mcfqspi); + if (status) { + dev_dbg(&pdev->dev, "error initializing cs_control\n"); + goto fail5; + } + + master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; + master->setup = mcfqspi_setup; + master->transfer = mcfqspi_transfer; + + platform_set_drvdata(pdev, master); + + status = spi_register_master(master); + if (status) { + dev_dbg(&pdev->dev, "spi_register_master failed\n"); + goto fail6; + } + dev_info(&pdev->dev, "Coldfire QSPI bus driver\n"); + + return 0; + +fail6: + mcfqspi_cs_teardown(mcfqspi); +fail5: + destroy_workqueue(mcfqspi->workq); +fail4: + clk_disable(mcfqspi->clk); + clk_put(mcfqspi->clk); +fail3: + free_irq(mcfqspi->irq, mcfqspi); +fail2: + iounmap(mcfqspi->iobase); +fail1: + release_mem_region(res->start, resource_size(res)); +fail0: + spi_master_put(master); + + dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n"); + + return status; +} + +static int __devexit mcfqspi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct mcfqspi *mcfqspi = spi_master_get_devdata(master); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + /* disable the hardware (set the baud rate to 0) */ + mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR); + + platform_set_drvdata(pdev, NULL); + mcfqspi_cs_teardown(mcfqspi); + destroy_workqueue(mcfqspi->workq); + clk_disable(mcfqspi->clk); + clk_put(mcfqspi->clk); + free_irq(mcfqspi->irq, mcfqspi); + iounmap(mcfqspi->iobase); + release_mem_region(res->start, resource_size(res)); + spi_unregister_master(master); + spi_master_put(master); + + return 0; +} + +#ifdef CONFIG_PM + +static int mcfqspi_suspend(struct device *dev) +{ + struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); + + clk_disable(mcfqspi->clk); + + return 0; +} + +static int mcfqspi_resume(struct device *dev) +{ + struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); + + clk_enable(mcfqspi->clk); + + return 0; +} + +static struct dev_pm_ops mcfqspi_dev_pm_ops = { + .suspend = mcfqspi_suspend, + .resume = mcfqspi_resume, +}; + +#define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops) +#else +#define MCFQSPI_DEV_PM_OPS NULL +#endif + +static struct platform_driver mcfqspi_driver = { + .driver.name = DRIVER_NAME, + .driver.owner = THIS_MODULE, + .driver.pm = MCFQSPI_DEV_PM_OPS, + .remove = __devexit_p(mcfqspi_remove), +}; + +static int __init mcfqspi_init(void) +{ + return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe); +} +module_init(mcfqspi_init); + +static void __exit mcfqspi_exit(void) +{ + platform_driver_unregister(&mcfqspi_driver); +} +module_exit(mcfqspi_exit); + +MODULE_AUTHOR("Steven King <sfking@fdwdc.com>"); +MODULE_DESCRIPTION("Coldfire QSPI Controller Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c new file mode 100644 index 00000000000..b85090caf7c --- /dev/null +++ b/drivers/spi/davinci_spi.c @@ -0,0 +1,1262 @@ +/* + * Copyright (C) 2009 Texas Instruments. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi_bitbang.h> +#include <linux/slab.h> + +#include <mach/spi.h> +#include <mach/edma.h> + +#define SPI_NO_RESOURCE ((resource_size_t)-1) + +#define SPI_MAX_CHIPSELECT 2 + +#define CS_DEFAULT 0xFF + +#define SPI_BUFSIZ (SMP_CACHE_BYTES + 1) +#define DAVINCI_DMA_DATA_TYPE_S8 0x01 +#define DAVINCI_DMA_DATA_TYPE_S16 0x02 +#define DAVINCI_DMA_DATA_TYPE_S32 0x04 + +#define SPIFMT_PHASE_MASK BIT(16) +#define SPIFMT_POLARITY_MASK BIT(17) +#define SPIFMT_DISTIMER_MASK BIT(18) +#define SPIFMT_SHIFTDIR_MASK BIT(20) +#define SPIFMT_WAITENA_MASK BIT(21) +#define SPIFMT_PARITYENA_MASK BIT(22) +#define SPIFMT_ODD_PARITY_MASK BIT(23) +#define SPIFMT_WDELAY_MASK 0x3f000000u +#define SPIFMT_WDELAY_SHIFT 24 +#define SPIFMT_CHARLEN_MASK 0x0000001Fu + +/* SPIGCR1 */ +#define SPIGCR1_SPIENA_MASK 0x01000000u + +/* SPIPC0 */ +#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ +#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ +#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ +#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ +#define SPIPC0_EN1FUN_MASK BIT(1) +#define SPIPC0_EN0FUN_MASK BIT(0) + +#define SPIINT_MASKALL 0x0101035F +#define SPI_INTLVL_1 0x000001FFu +#define SPI_INTLVL_0 0x00000000u + +/* SPIDAT1 */ +#define SPIDAT1_CSHOLD_SHIFT 28 +#define SPIDAT1_CSNR_SHIFT 16 +#define SPIGCR1_CLKMOD_MASK BIT(1) +#define SPIGCR1_MASTER_MASK BIT(0) +#define SPIGCR1_LOOPBACK_MASK BIT(16) + +/* SPIBUF */ +#define SPIBUF_TXFULL_MASK BIT(29) +#define SPIBUF_RXEMPTY_MASK BIT(31) + +/* Error Masks */ +#define SPIFLG_DLEN_ERR_MASK BIT(0) +#define SPIFLG_TIMEOUT_MASK BIT(1) +#define SPIFLG_PARERR_MASK BIT(2) +#define SPIFLG_DESYNC_MASK BIT(3) +#define SPIFLG_BITERR_MASK BIT(4) +#define SPIFLG_OVRRUN_MASK BIT(6) +#define SPIFLG_RX_INTR_MASK BIT(8) +#define SPIFLG_TX_INTR_MASK BIT(9) +#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) +#define SPIFLG_MASK (SPIFLG_DLEN_ERR_MASK \ + | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ + | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ + | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \ + | SPIFLG_TX_INTR_MASK \ + | SPIFLG_BUF_INIT_ACTIVE_MASK) + +#define SPIINT_DLEN_ERR_INTR BIT(0) +#define SPIINT_TIMEOUT_INTR BIT(1) +#define SPIINT_PARERR_INTR BIT(2) +#define SPIINT_DESYNC_INTR BIT(3) +#define SPIINT_BITERR_INTR BIT(4) +#define SPIINT_OVRRUN_INTR BIT(6) +#define SPIINT_RX_INTR BIT(8) +#define SPIINT_TX_INTR BIT(9) +#define SPIINT_DMA_REQ_EN BIT(16) +#define SPIINT_ENABLE_HIGHZ BIT(24) + +#define SPI_T2CDELAY_SHIFT 16 +#define SPI_C2TDELAY_SHIFT 24 + +/* SPI Controller registers */ +#define SPIGCR0 0x00 +#define SPIGCR1 0x04 +#define SPIINT 0x08 +#define SPILVL 0x0c +#define SPIFLG 0x10 +#define SPIPC0 0x14 +#define SPIPC1 0x18 +#define SPIPC2 0x1c +#define SPIPC3 0x20 +#define SPIPC4 0x24 +#define SPIPC5 0x28 +#define SPIPC6 0x2c +#define SPIPC7 0x30 +#define SPIPC8 0x34 +#define SPIDAT0 0x38 +#define SPIDAT1 0x3c +#define SPIBUF 0x40 +#define SPIEMU 0x44 +#define SPIDELAY 0x48 +#define SPIDEF 0x4c +#define SPIFMT0 0x50 +#define SPIFMT1 0x54 +#define SPIFMT2 0x58 +#define SPIFMT3 0x5c +#define TGINTVEC0 0x60 +#define TGINTVEC1 0x64 + +struct davinci_spi_slave { + u32 cmd_to_write; + u32 clk_ctrl_to_write; + u32 bytes_per_word; + u8 active_cs; +}; + +/* We have 2 DMA channels per CS, one for RX and one for TX */ +struct davinci_spi_dma { + int dma_tx_channel; + int dma_rx_channel; + int dma_tx_sync_dev; + int dma_rx_sync_dev; + enum dma_event_q eventq; + + struct completion dma_tx_completion; + struct completion dma_rx_completion; +}; + +/* SPI Controller driver's private data. */ +struct davinci_spi { + struct spi_bitbang bitbang; + struct clk *clk; + + u8 version; + resource_size_t pbase; + void __iomem *base; + size_t region_size; + u32 irq; + struct completion done; + + const void *tx; + void *rx; + u8 *tmp_buf; + int count; + struct davinci_spi_dma *dma_channels; + struct davinci_spi_platform_data *pdata; + + void (*get_rx)(u32 rx_data, struct davinci_spi *); + u32 (*get_tx)(struct davinci_spi *); + + struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT]; +}; + +static unsigned use_dma; + +static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi) +{ + u8 *rx = davinci_spi->rx; + + *rx++ = (u8)data; + davinci_spi->rx = rx; +} + +static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi) +{ + u16 *rx = davinci_spi->rx; + + *rx++ = (u16)data; + davinci_spi->rx = rx; +} + +static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi) +{ + u32 data; + const u8 *tx = davinci_spi->tx; + + data = *tx++; + davinci_spi->tx = tx; + return data; +} + +static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi) +{ + u32 data; + const u16 *tx = davinci_spi->tx; + + data = *tx++; + davinci_spi->tx = tx; + return data; +} + +static inline void set_io_bits(void __iomem *addr, u32 bits) +{ + u32 v = ioread32(addr); + + v |= bits; + iowrite32(v, addr); +} + +static inline void clear_io_bits(void __iomem *addr, u32 bits) +{ + u32 v = ioread32(addr); + + v &= ~bits; + iowrite32(v, addr); +} + +static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num) +{ + set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); +} + +static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num) +{ + clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); +} + +static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable) +{ + struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); + + if (enable) + set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); + else + clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); +} + +/* + * Interface to control the chip select signal + */ +static void davinci_spi_chipselect(struct spi_device *spi, int value) +{ + struct davinci_spi *davinci_spi; + struct davinci_spi_platform_data *pdata; + u32 data1_reg_val = 0; + + davinci_spi = spi_master_get_devdata(spi->master); + pdata = davinci_spi->pdata; + + /* + * Board specific chip select logic decides the polarity and cs + * line for the controller + */ + if (value == BITBANG_CS_INACTIVE) { + set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT); + + data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT; + iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); + + while ((ioread32(davinci_spi->base + SPIBUF) + & SPIBUF_RXEMPTY_MASK) == 0) + cpu_relax(); + } +} + +/** + * davinci_spi_setup_transfer - This functions will determine transfer method + * @spi: spi device on which data transfer to be done + * @t: spi transfer in which transfer info is filled + * + * This function determines data transfer method (8/16/32 bit transfer). + * It will also set the SPI Clock Control register according to + * SPI slave device freq. + */ +static int davinci_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + + struct davinci_spi *davinci_spi; + struct davinci_spi_platform_data *pdata; + u8 bits_per_word = 0; + u32 hz = 0, prescale = 0, clkspeed; + + davinci_spi = spi_master_get_devdata(spi->master); + pdata = davinci_spi->pdata; + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } + + /* if bits_per_word is not set then set it default */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + + /* + * Assign function pointer to appropriate transfer method + * 8bit, 16bit or 32bit transfer + */ + if (bits_per_word <= 8 && bits_per_word >= 2) { + davinci_spi->get_rx = davinci_spi_rx_buf_u8; + davinci_spi->get_tx = davinci_spi_tx_buf_u8; + davinci_spi->slave[spi->chip_select].bytes_per_word = 1; + } else if (bits_per_word <= 16 && bits_per_word >= 2) { + davinci_spi->get_rx = davinci_spi_rx_buf_u16; + davinci_spi->get_tx = davinci_spi_tx_buf_u16; + davinci_spi->slave[spi->chip_select].bytes_per_word = 2; + } else + return -EINVAL; + + if (!hz) + hz = spi->max_speed_hz; + + clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK, + spi->chip_select); + set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f, + spi->chip_select); + + clkspeed = clk_get_rate(davinci_spi->clk); + if (hz > clkspeed / 2) + prescale = 1 << 8; + if (hz < clkspeed / 256) + prescale = 255 << 8; + if (!prescale) + prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00; + + clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select); + set_fmt_bits(davinci_spi->base, prescale, spi->chip_select); + + return 0; +} + +static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) +{ + struct spi_device *spi = (struct spi_device *)data; + struct davinci_spi *davinci_spi; + struct davinci_spi_dma *davinci_spi_dma; + struct davinci_spi_platform_data *pdata; + + davinci_spi = spi_master_get_devdata(spi->master); + davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); + pdata = davinci_spi->pdata; + + if (ch_status == DMA_COMPLETE) + edma_stop(davinci_spi_dma->dma_rx_channel); + else + edma_clean_channel(davinci_spi_dma->dma_rx_channel); + + complete(&davinci_spi_dma->dma_rx_completion); + /* We must disable the DMA RX request */ + davinci_spi_set_dma_req(spi, 0); +} + +static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) +{ + struct spi_device *spi = (struct spi_device *)data; + struct davinci_spi *davinci_spi; + struct davinci_spi_dma *davinci_spi_dma; + struct davinci_spi_platform_data *pdata; + + davinci_spi = spi_master_get_devdata(spi->master); + davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); + pdata = davinci_spi->pdata; + + if (ch_status == DMA_COMPLETE) + edma_stop(davinci_spi_dma->dma_tx_channel); + else + edma_clean_channel(davinci_spi_dma->dma_tx_channel); + + complete(&davinci_spi_dma->dma_tx_completion); + /* We must disable the DMA TX request */ + davinci_spi_set_dma_req(spi, 0); +} + +static int davinci_spi_request_dma(struct spi_device *spi) +{ + struct davinci_spi *davinci_spi; + struct davinci_spi_dma *davinci_spi_dma; + struct davinci_spi_platform_data *pdata; + struct device *sdev; + int r; + + davinci_spi = spi_master_get_devdata(spi->master); + davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; + pdata = davinci_spi->pdata; + sdev = davinci_spi->bitbang.master->dev.parent; + + r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev, + davinci_spi_dma_rx_callback, spi, + davinci_spi_dma->eventq); + if (r < 0) { + dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n"); + return -EAGAIN; + } + davinci_spi_dma->dma_rx_channel = r; + r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev, + davinci_spi_dma_tx_callback, spi, + davinci_spi_dma->eventq); + if (r < 0) { + edma_free_channel(davinci_spi_dma->dma_rx_channel); + davinci_spi_dma->dma_rx_channel = -1; + dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n"); + return -EAGAIN; + } + davinci_spi_dma->dma_tx_channel = r; + + return 0; +} + +/** + * davinci_spi_setup - This functions will set default transfer method + * @spi: spi device on which data transfer to be done + * + * This functions sets the default transfer method. + */ + +static int davinci_spi_setup(struct spi_device *spi) +{ + int retval; + struct davinci_spi *davinci_spi; + struct davinci_spi_dma *davinci_spi_dma; + struct device *sdev; + + davinci_spi = spi_master_get_devdata(spi->master); + sdev = davinci_spi->bitbang.master->dev.parent; + + /* if bits per word length is zero then set it default 8 */ + if (!spi->bits_per_word) + spi->bits_per_word = 8; + + davinci_spi->slave[spi->chip_select].cmd_to_write = 0; + + if (use_dma && davinci_spi->dma_channels) { + davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; + + if ((davinci_spi_dma->dma_rx_channel == -1) + || (davinci_spi_dma->dma_tx_channel == -1)) { + retval = davinci_spi_request_dma(spi); + if (retval < 0) + return retval; + } + } + + /* + * SPI in DaVinci and DA8xx operate between + * 600 KHz and 50 MHz + */ + if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) { + dev_dbg(sdev, "Operating frequency is not in acceptable " + "range\n"); + return -EINVAL; + } + + /* + * Set up SPIFMTn register, unique to this chipselect. + * + * NOTE: we could do all of these with one write. Also, some + * of the "version 2" features are found in chips that don't + * support all of them... + */ + if (spi->mode & SPI_LSB_FIRST) + set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, + spi->chip_select); + else + clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, + spi->chip_select); + + if (spi->mode & SPI_CPOL) + set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, + spi->chip_select); + else + clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, + spi->chip_select); + + if (!(spi->mode & SPI_CPHA)) + set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, + spi->chip_select); + else + clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, + spi->chip_select); + + /* + * Version 1 hardware supports two basic SPI modes: + * - Standard SPI mode uses 4 pins, with chipselect + * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) + * (distinct from SPI_3WIRE, with just one data wire; + * or similar variants without MOSI or without MISO) + * + * Version 2 hardware supports an optional handshaking signal, + * so it can support two more modes: + * - 5 pin SPI variant is standard SPI plus SPI_READY + * - 4 pin with enable is (SPI_READY | SPI_NO_CS) + */ + + if (davinci_spi->version == SPI_VERSION_2) { + clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK, + spi->chip_select); + set_fmt_bits(davinci_spi->base, + (davinci_spi->pdata->wdelay + << SPIFMT_WDELAY_SHIFT) + & SPIFMT_WDELAY_MASK, + spi->chip_select); + + if (davinci_spi->pdata->odd_parity) + set_fmt_bits(davinci_spi->base, + SPIFMT_ODD_PARITY_MASK, + spi->chip_select); + else + clear_fmt_bits(davinci_spi->base, + SPIFMT_ODD_PARITY_MASK, + spi->chip_select); + + if (davinci_spi->pdata->parity_enable) + set_fmt_bits(davinci_spi->base, + SPIFMT_PARITYENA_MASK, + spi->chip_select); + else + clear_fmt_bits(davinci_spi->base, + SPIFMT_PARITYENA_MASK, + spi->chip_select); + + if (davinci_spi->pdata->wait_enable) + set_fmt_bits(davinci_spi->base, + SPIFMT_WAITENA_MASK, + spi->chip_select); + else + clear_fmt_bits(davinci_spi->base, + SPIFMT_WAITENA_MASK, + spi->chip_select); + + if (davinci_spi->pdata->timer_disable) + set_fmt_bits(davinci_spi->base, + SPIFMT_DISTIMER_MASK, + spi->chip_select); + else + clear_fmt_bits(davinci_spi->base, + SPIFMT_DISTIMER_MASK, + spi->chip_select); + } + + retval = davinci_spi_setup_transfer(spi, NULL); + + return retval; +} + +static void davinci_spi_cleanup(struct spi_device *spi) +{ + struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); + struct davinci_spi_dma *davinci_spi_dma; + + davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; + + if (use_dma && davinci_spi->dma_channels) { + davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; + + if ((davinci_spi_dma->dma_rx_channel != -1) + && (davinci_spi_dma->dma_tx_channel != -1)) { + edma_free_channel(davinci_spi_dma->dma_tx_channel); + edma_free_channel(davinci_spi_dma->dma_rx_channel); + } + } +} + +static int davinci_spi_bufs_prep(struct spi_device *spi, + struct davinci_spi *davinci_spi) +{ + int op_mode = 0; + + /* + * REVISIT unless devices disagree about SPI_LOOP or + * SPI_READY (SPI_NO_CS only allows one device!), this + * should not need to be done before each message... + * optimize for both flags staying cleared. + */ + + op_mode = SPIPC0_DIFUN_MASK + | SPIPC0_DOFUN_MASK + | SPIPC0_CLKFUN_MASK; + if (!(spi->mode & SPI_NO_CS)) + op_mode |= 1 << spi->chip_select; + if (spi->mode & SPI_READY) + op_mode |= SPIPC0_SPIENA_MASK; + + iowrite32(op_mode, davinci_spi->base + SPIPC0); + + if (spi->mode & SPI_LOOP) + set_io_bits(davinci_spi->base + SPIGCR1, + SPIGCR1_LOOPBACK_MASK); + else + clear_io_bits(davinci_spi->base + SPIGCR1, + SPIGCR1_LOOPBACK_MASK); + + return 0; +} + +static int davinci_spi_check_error(struct davinci_spi *davinci_spi, + int int_status) +{ + struct device *sdev = davinci_spi->bitbang.master->dev.parent; + + if (int_status & SPIFLG_TIMEOUT_MASK) { + dev_dbg(sdev, "SPI Time-out Error\n"); + return -ETIMEDOUT; + } + if (int_status & SPIFLG_DESYNC_MASK) { + dev_dbg(sdev, "SPI Desynchronization Error\n"); + return -EIO; + } + if (int_status & SPIFLG_BITERR_MASK) { + dev_dbg(sdev, "SPI Bit error\n"); + return -EIO; + } + + if (davinci_spi->version == SPI_VERSION_2) { + if (int_status & SPIFLG_DLEN_ERR_MASK) { + dev_dbg(sdev, "SPI Data Length Error\n"); + return -EIO; + } + if (int_status & SPIFLG_PARERR_MASK) { + dev_dbg(sdev, "SPI Parity Error\n"); + return -EIO; + } + if (int_status & SPIFLG_OVRRUN_MASK) { + dev_dbg(sdev, "SPI Data Overrun error\n"); + return -EIO; + } + if (int_status & SPIFLG_TX_INTR_MASK) { + dev_dbg(sdev, "SPI TX intr bit set\n"); + return -EIO; + } + if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { + dev_dbg(sdev, "SPI Buffer Init Active\n"); + return -EBUSY; + } + } + + return 0; +} + +/** + * davinci_spi_bufs - functions which will handle transfer data + * @spi: spi device on which data transfer to be done + * @t: spi transfer in which transfer info is filled + * + * This function will put data to be transferred into data register + * of SPI controller and then wait until the completion will be marked + * by the IRQ Handler. + */ +static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) +{ + struct davinci_spi *davinci_spi; + int int_status, count, ret; + u8 conv, tmp; + u32 tx_data, data1_reg_val; + u32 buf_val, flg_val; + struct davinci_spi_platform_data *pdata; + + davinci_spi = spi_master_get_devdata(spi->master); + pdata = davinci_spi->pdata; + + davinci_spi->tx = t->tx_buf; + davinci_spi->rx = t->rx_buf; + + /* convert len to words based on bits_per_word */ + conv = davinci_spi->slave[spi->chip_select].bytes_per_word; + davinci_spi->count = t->len / conv; + + INIT_COMPLETION(davinci_spi->done); + + ret = davinci_spi_bufs_prep(spi, davinci_spi); + if (ret) + return ret; + + /* Enable SPI */ + set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); + + iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | + (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), + davinci_spi->base + SPIDELAY); + + count = davinci_spi->count; + data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; + tmp = ~(0x1 << spi->chip_select); + + clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); + + data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; + + while ((ioread32(davinci_spi->base + SPIBUF) + & SPIBUF_RXEMPTY_MASK) == 0) + cpu_relax(); + + /* Determine the command to execute READ or WRITE */ + if (t->tx_buf) { + clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); + + while (1) { + tx_data = davinci_spi->get_tx(davinci_spi); + + data1_reg_val &= ~(0xFFFF); + data1_reg_val |= (0xFFFF & tx_data); + + buf_val = ioread32(davinci_spi->base + SPIBUF); + if ((buf_val & SPIBUF_TXFULL_MASK) == 0) { + iowrite32(data1_reg_val, + davinci_spi->base + SPIDAT1); + + count--; + } + while (ioread32(davinci_spi->base + SPIBUF) + & SPIBUF_RXEMPTY_MASK) + cpu_relax(); + + /* getting the returned byte */ + if (t->rx_buf) { + buf_val = ioread32(davinci_spi->base + SPIBUF); + davinci_spi->get_rx(buf_val, davinci_spi); + } + if (count <= 0) + break; + } + } else { + if (pdata->poll_mode) { + while (1) { + /* keeps the serial clock going */ + if ((ioread32(davinci_spi->base + SPIBUF) + & SPIBUF_TXFULL_MASK) == 0) + iowrite32(data1_reg_val, + davinci_spi->base + SPIDAT1); + + while (ioread32(davinci_spi->base + SPIBUF) & + SPIBUF_RXEMPTY_MASK) + cpu_relax(); + + flg_val = ioread32(davinci_spi->base + SPIFLG); + buf_val = ioread32(davinci_spi->base + SPIBUF); + + davinci_spi->get_rx(buf_val, davinci_spi); + + count--; + if (count <= 0) + break; + } + } else { /* Receive in Interrupt mode */ + int i; + + for (i = 0; i < davinci_spi->count; i++) { + set_io_bits(davinci_spi->base + SPIINT, + SPIINT_BITERR_INTR + | SPIINT_OVRRUN_INTR + | SPIINT_RX_INTR); + + iowrite32(data1_reg_val, + davinci_spi->base + SPIDAT1); + + while (ioread32(davinci_spi->base + SPIINT) & + SPIINT_RX_INTR) + cpu_relax(); + } + iowrite32((data1_reg_val & 0x0ffcffff), + davinci_spi->base + SPIDAT1); + } + } + + /* + * Check for bit error, desync error,parity error,timeout error and + * receive overflow errors + */ + int_status = ioread32(davinci_spi->base + SPIFLG); + + ret = davinci_spi_check_error(davinci_spi, int_status); + if (ret != 0) + return ret; + + /* SPI Framework maintains the count only in bytes so convert back */ + davinci_spi->count *= conv; + + return t->len; +} + +#define DAVINCI_DMA_DATA_TYPE_S8 0x01 +#define DAVINCI_DMA_DATA_TYPE_S16 0x02 +#define DAVINCI_DMA_DATA_TYPE_S32 0x04 + +static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) +{ + struct davinci_spi *davinci_spi; + int int_status = 0; + int count, temp_count; + u8 conv = 1; + u8 tmp; + u32 data1_reg_val; + struct davinci_spi_dma *davinci_spi_dma; + int word_len, data_type, ret; + unsigned long tx_reg, rx_reg; + struct davinci_spi_platform_data *pdata; + struct device *sdev; + + davinci_spi = spi_master_get_devdata(spi->master); + pdata = davinci_spi->pdata; + sdev = davinci_spi->bitbang.master->dev.parent; + + davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; + + tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; + rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; + + davinci_spi->tx = t->tx_buf; + davinci_spi->rx = t->rx_buf; + + /* convert len to words based on bits_per_word */ + conv = davinci_spi->slave[spi->chip_select].bytes_per_word; + davinci_spi->count = t->len / conv; + + INIT_COMPLETION(davinci_spi->done); + + init_completion(&davinci_spi_dma->dma_rx_completion); + init_completion(&davinci_spi_dma->dma_tx_completion); + + word_len = conv * 8; + + if (word_len <= 8) + data_type = DAVINCI_DMA_DATA_TYPE_S8; + else if (word_len <= 16) + data_type = DAVINCI_DMA_DATA_TYPE_S16; + else if (word_len <= 32) + data_type = DAVINCI_DMA_DATA_TYPE_S32; + else + return -EINVAL; + + ret = davinci_spi_bufs_prep(spi, davinci_spi); + if (ret) + return ret; + + /* Put delay val if required */ + iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | + (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), + davinci_spi->base + SPIDELAY); + + count = davinci_spi->count; /* the number of elements */ + data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; + + /* CS default = 0xFF */ + tmp = ~(0x1 << spi->chip_select); + + clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); + + data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; + + /* disable all interrupts for dma transfers */ + clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); + /* Disable SPI to write configuration bits in SPIDAT */ + clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); + iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); + /* Enable SPI */ + set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); + + while ((ioread32(davinci_spi->base + SPIBUF) + & SPIBUF_RXEMPTY_MASK) == 0) + cpu_relax(); + + + if (t->tx_buf) { + t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, + DMA_TO_DEVICE); + if (dma_mapping_error(&spi->dev, t->tx_dma)) { + dev_dbg(sdev, "Unable to DMA map a %d bytes" + " TX buffer\n", count); + return -ENOMEM; + } + temp_count = count; + } else { + /* We need TX clocking for RX transaction */ + t->tx_dma = dma_map_single(&spi->dev, + (void *)davinci_spi->tmp_buf, count + 1, + DMA_TO_DEVICE); + if (dma_mapping_error(&spi->dev, t->tx_dma)) { + dev_dbg(sdev, "Unable to DMA map a %d bytes" + " TX tmp buffer\n", count); + return -ENOMEM; + } + temp_count = count + 1; + } + + edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, + data_type, temp_count, 1, 0, ASYNC); + edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); + edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); + edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); + edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); + + if (t->rx_buf) { + /* initiate transaction */ + iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); + + t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, + DMA_FROM_DEVICE); + if (dma_mapping_error(&spi->dev, t->rx_dma)) { + dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", + count); + if (t->tx_buf != NULL) + dma_unmap_single(NULL, t->tx_dma, + count, DMA_TO_DEVICE); + return -ENOMEM; + } + edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, + data_type, count, 1, 0, ASYNC); + edma_set_src(davinci_spi_dma->dma_rx_channel, + rx_reg, INCR, W8BIT); + edma_set_dest(davinci_spi_dma->dma_rx_channel, + t->rx_dma, INCR, W8BIT); + edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); + edma_set_dest_index(davinci_spi_dma->dma_rx_channel, + data_type, 0); + } + + if ((t->tx_buf) || (t->rx_buf)) + edma_start(davinci_spi_dma->dma_tx_channel); + + if (t->rx_buf) + edma_start(davinci_spi_dma->dma_rx_channel); + + if ((t->rx_buf) || (t->tx_buf)) + davinci_spi_set_dma_req(spi, 1); + + if (t->tx_buf) + wait_for_completion_interruptible( + &davinci_spi_dma->dma_tx_completion); + + if (t->rx_buf) + wait_for_completion_interruptible( + &davinci_spi_dma->dma_rx_completion); + + dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE); + + if (t->rx_buf) + dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); + + /* + * Check for bit error, desync error,parity error,timeout error and + * receive overflow errors + */ + int_status = ioread32(davinci_spi->base + SPIFLG); + + ret = davinci_spi_check_error(davinci_spi, int_status); + if (ret != 0) + return ret; + + /* SPI Framework maintains the count only in bytes so convert back */ + davinci_spi->count *= conv; + + return t->len; +} + +/** + * davinci_spi_irq - IRQ handler for DaVinci SPI + * @irq: IRQ number for this SPI Master + * @context_data: structure for SPI Master controller davinci_spi + */ +static irqreturn_t davinci_spi_irq(s32 irq, void *context_data) +{ + struct davinci_spi *davinci_spi = context_data; + u32 int_status, rx_data = 0; + irqreturn_t ret = IRQ_NONE; + + int_status = ioread32(davinci_spi->base + SPIFLG); + + while ((int_status & SPIFLG_RX_INTR_MASK)) { + if (likely(int_status & SPIFLG_RX_INTR_MASK)) { + ret = IRQ_HANDLED; + + rx_data = ioread32(davinci_spi->base + SPIBUF); + davinci_spi->get_rx(rx_data, davinci_spi); + + /* Disable Receive Interrupt */ + iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR), + davinci_spi->base + SPIINT); + } else + (void)davinci_spi_check_error(davinci_spi, int_status); + + int_status = ioread32(davinci_spi->base + SPIFLG); + } + + return ret; +} + +/** + * davinci_spi_probe - probe function for SPI Master Controller + * @pdev: platform_device structure which contains plateform specific data + */ +static int davinci_spi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct davinci_spi *davinci_spi; + struct davinci_spi_platform_data *pdata; + struct resource *r, *mem; + resource_size_t dma_rx_chan = SPI_NO_RESOURCE; + resource_size_t dma_tx_chan = SPI_NO_RESOURCE; + resource_size_t dma_eventq = SPI_NO_RESOURCE; + int i = 0, ret = 0; + + pdata = pdev->dev.platform_data; + if (pdata == NULL) { + ret = -ENODEV; + goto err; + } + + master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); + if (master == NULL) { + ret = -ENOMEM; + goto err; + } + + dev_set_drvdata(&pdev->dev, master); + + davinci_spi = spi_master_get_devdata(master); + if (davinci_spi == NULL) { + ret = -ENOENT; + goto free_master; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + ret = -ENOENT; + goto free_master; + } + + davinci_spi->pbase = r->start; + davinci_spi->region_size = resource_size(r); + davinci_spi->pdata = pdata; + + mem = request_mem_region(r->start, davinci_spi->region_size, + pdev->name); + if (mem == NULL) { + ret = -EBUSY; + goto free_master; + } + + davinci_spi->base = (struct davinci_spi_reg __iomem *) + ioremap(r->start, davinci_spi->region_size); + if (davinci_spi->base == NULL) { + ret = -ENOMEM; + goto release_region; + } + + davinci_spi->irq = platform_get_irq(pdev, 0); + if (davinci_spi->irq <= 0) { + ret = -EINVAL; + goto unmap_io; + } + + ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED, + dev_name(&pdev->dev), davinci_spi); + if (ret) + goto unmap_io; + + /* Allocate tmp_buf for tx_buf */ + davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL); + if (davinci_spi->tmp_buf == NULL) { + ret = -ENOMEM; + goto irq_free; + } + + davinci_spi->bitbang.master = spi_master_get(master); + if (davinci_spi->bitbang.master == NULL) { + ret = -ENODEV; + goto free_tmp_buf; + } + + davinci_spi->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(davinci_spi->clk)) { + ret = -ENODEV; + goto put_master; + } + clk_enable(davinci_spi->clk); + + + master->bus_num = pdev->id; + master->num_chipselect = pdata->num_chipselect; + master->setup = davinci_spi_setup; + master->cleanup = davinci_spi_cleanup; + + davinci_spi->bitbang.chipselect = davinci_spi_chipselect; + davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer; + + davinci_spi->version = pdata->version; + use_dma = pdata->use_dma; + + davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; + if (davinci_spi->version == SPI_VERSION_2) + davinci_spi->bitbang.flags |= SPI_READY; + + if (use_dma) { + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (r) + dma_rx_chan = r->start; + r = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (r) + dma_tx_chan = r->start; + r = platform_get_resource(pdev, IORESOURCE_DMA, 2); + if (r) + dma_eventq = r->start; + } + + if (!use_dma || + dma_rx_chan == SPI_NO_RESOURCE || + dma_tx_chan == SPI_NO_RESOURCE || + dma_eventq == SPI_NO_RESOURCE) { + davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio; + use_dma = 0; + } else { + davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma; + davinci_spi->dma_channels = kzalloc(master->num_chipselect + * sizeof(struct davinci_spi_dma), GFP_KERNEL); + if (davinci_spi->dma_channels == NULL) { + ret = -ENOMEM; + goto free_clk; + } + + for (i = 0; i < master->num_chipselect; i++) { + davinci_spi->dma_channels[i].dma_rx_channel = -1; + davinci_spi->dma_channels[i].dma_rx_sync_dev = + dma_rx_chan; + davinci_spi->dma_channels[i].dma_tx_channel = -1; + davinci_spi->dma_channels[i].dma_tx_sync_dev = + dma_tx_chan; + davinci_spi->dma_channels[i].eventq = dma_eventq; + } + dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n" + "Using RX channel = %d , TX channel = %d and " + "event queue = %d", dma_rx_chan, dma_tx_chan, + dma_eventq); + } + + davinci_spi->get_rx = davinci_spi_rx_buf_u8; + davinci_spi->get_tx = davinci_spi_tx_buf_u8; + + init_completion(&davinci_spi->done); + + /* Reset In/OUT SPI module */ + iowrite32(0, davinci_spi->base + SPIGCR0); + udelay(100); + iowrite32(1, davinci_spi->base + SPIGCR0); + + /* Clock internal */ + if (davinci_spi->pdata->clk_internal) + set_io_bits(davinci_spi->base + SPIGCR1, + SPIGCR1_CLKMOD_MASK); + else + clear_io_bits(davinci_spi->base + SPIGCR1, + SPIGCR1_CLKMOD_MASK); + + /* master mode default */ + set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK); + + if (davinci_spi->pdata->intr_level) + iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL); + else + iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL); + + ret = spi_bitbang_start(&davinci_spi->bitbang); + if (ret) + goto free_clk; + + dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base); + + if (!pdata->poll_mode) + dev_info(&pdev->dev, "Operating in interrupt mode" + " using IRQ %d\n", davinci_spi->irq); + + return ret; + +free_clk: + clk_disable(davinci_spi->clk); + clk_put(davinci_spi->clk); +put_master: + spi_master_put(master); +free_tmp_buf: + kfree(davinci_spi->tmp_buf); +irq_free: + free_irq(davinci_spi->irq, davinci_spi); +unmap_io: + iounmap(davinci_spi->base); +release_region: + release_mem_region(davinci_spi->pbase, davinci_spi->region_size); +free_master: + kfree(master); +err: + return ret; +} + +/** + * davinci_spi_remove - remove function for SPI Master Controller + * @pdev: platform_device structure which contains plateform specific data + * + * This function will do the reverse action of davinci_spi_probe function + * It will free the IRQ and SPI controller's memory region. + * It will also call spi_bitbang_stop to destroy the work queue which was + * created by spi_bitbang_start. + */ +static int __exit davinci_spi_remove(struct platform_device *pdev) +{ + struct davinci_spi *davinci_spi; + struct spi_master *master; + + master = dev_get_drvdata(&pdev->dev); + davinci_spi = spi_master_get_devdata(master); + + spi_bitbang_stop(&davinci_spi->bitbang); + + clk_disable(davinci_spi->clk); + clk_put(davinci_spi->clk); + spi_master_put(master); + kfree(davinci_spi->tmp_buf); + free_irq(davinci_spi->irq, davinci_spi); + iounmap(davinci_spi->base); + release_mem_region(davinci_spi->pbase, davinci_spi->region_size); + + return 0; +} + +static struct platform_driver davinci_spi_driver = { + .driver.name = "spi_davinci", + .remove = __exit_p(davinci_spi_remove), +}; + +static int __init davinci_spi_init(void) +{ + return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe); +} +module_init(davinci_spi_init); + +static void __exit davinci_spi_exit(void) +{ + platform_driver_unregister(&davinci_spi_driver); +} +module_exit(davinci_spi_exit); + +MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index 31620fae77b..90439314cf6 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/highmem.h> #include <linux/delay.h> +#include <linux/slab.h> #include <linux/spi/dw_spi.h> #include <linux/spi/spi.h> @@ -130,6 +131,7 @@ static const struct file_operations mrst_spi_regs_ops = { .owner = THIS_MODULE, .open = spi_show_regs_open, .read = spi_show_regs, + .llseek = default_llseek, }; static int mrst_spi_debugfs_init(struct dw_spi *dws) @@ -152,6 +154,7 @@ static void mrst_spi_debugfs_remove(struct dw_spi *dws) #else static inline int mrst_spi_debugfs_init(struct dw_spi *dws) { + return 0; } static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) @@ -161,14 +164,14 @@ static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) static void wait_till_not_busy(struct dw_spi *dws) { - unsigned long end = jiffies + usecs_to_jiffies(1000); + unsigned long end = jiffies + 1 + usecs_to_jiffies(1000); while (time_before(jiffies, end)) { if (!(dw_readw(dws, sr) & SR_BUSY)) return; } dev_err(&dws->master->dev, - "DW SPI: Stutus keeps busy for 1000us after a read/write!\n"); + "DW SPI: Status keeps busy for 1000us after a read/write!\n"); } static void flush(struct dw_spi *dws) @@ -179,10 +182,6 @@ static void flush(struct dw_spi *dws) wait_till_not_busy(dws); } -static void null_cs_control(u32 command) -{ -} - static int null_writer(struct dw_spi *dws) { u8 n_bytes = dws->n_bytes; @@ -320,7 +319,7 @@ static void giveback(struct dw_spi *dws) struct spi_transfer, transfer_list); - if (!last_transfer->cs_change) + if (!last_transfer->cs_change && dws->cs_control) dws->cs_control(MRST_SPI_DEASSERT); msg->state = NULL; @@ -358,6 +357,8 @@ static void transfer_complete(struct dw_spi *dws) static irqreturn_t interrupt_transfer(struct dw_spi *dws) { u16 irq_status, irq_mask = 0x3f; + u32 int_level = dws->fifo_len / 2; + u32 left; irq_status = dw_readw(dws, isr) & irq_mask; /* Error handling */ @@ -369,28 +370,34 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) return IRQ_HANDLED; } - /* INT comes from tx */ - if (dws->tx && (irq_status & SPI_INT_TXEI)) { - while (dws->tx < dws->tx_end) + if (irq_status & SPI_INT_TXEI) { + spi_mask_intr(dws, SPI_INT_TXEI); + + left = (dws->tx_end - dws->tx) / dws->n_bytes; + left = (left > int_level) ? int_level : left; + + while (left--) dws->write(dws); + dws->read(dws); - if (dws->tx == dws->tx_end) { - spi_mask_intr(dws, SPI_INT_TXEI); + /* Re-enable the IRQ if there is still data left to tx */ + if (dws->tx_end > dws->tx) + spi_umask_intr(dws, SPI_INT_TXEI); + else transfer_complete(dws); - } } - /* INT comes from rx */ - if (dws->rx && (irq_status & SPI_INT_RXFI)) { - if (dws->read(dws)) - transfer_complete(dws); - } return IRQ_HANDLED; } static irqreturn_t dw_spi_irq(int irq, void *dev_id) { struct dw_spi *dws = dev_id; + u16 irq_status, irq_mask = 0x3f; + + irq_status = dw_readw(dws, isr) & irq_mask; + if (!irq_status) + return IRQ_NONE; if (!dws->cur_msg) { spi_mask_intr(dws, SPI_INT_TXEI); @@ -404,12 +411,9 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id) /* Must be called inside pump_transfers() */ static void poll_transfer(struct dw_spi *dws) { - if (dws->tx) { - while (dws->write(dws)) - dws->read(dws); - } + while (dws->write(dws)) + dws->read(dws); - dws->read(dws); transfer_complete(dws); } @@ -428,6 +432,7 @@ static void pump_transfers(unsigned long data) u8 bits = 0; u8 imask = 0; u8 cs_change = 0; + u16 txint_level = 0; u16 clk_div = 0; u32 speed = 0; u32 cr0 = 0; @@ -438,6 +443,9 @@ static void pump_transfers(unsigned long data) chip = dws->cur_chip; spi = message->spi; + if (unlikely(!chip->clk_div)) + chip->clk_div = dws->max_freq / chip->speed_hz; + if (message->state == ERROR_STATE) { message->status = -EIO; goto early_exit; @@ -492,7 +500,7 @@ static void pump_transfers(unsigned long data) /* clk_div doesn't support odd number */ clk_div = dws->max_freq / speed; - clk_div = (clk_div >> 1) << 1; + clk_div = (clk_div + 1) & 0xfffe; chip->speed_hz = speed; chip->clk_div = clk_div; @@ -532,14 +540,35 @@ static void pump_transfers(unsigned long data) } message->state = RUNNING_STATE; + /* + * Adjust transfer mode if necessary. Requires platform dependent + * chipselect mechanism. + */ + if (dws->cs_control) { + if (dws->rx && dws->tx) + chip->tmode = SPI_TMOD_TR; + else if (dws->rx) + chip->tmode = SPI_TMOD_RO; + else + chip->tmode = SPI_TMOD_TO; + + cr0 &= ~SPI_TMOD_MASK; + cr0 |= (chip->tmode << SPI_TMOD_OFFSET); + } + /* Check if current transfer is a DMA transaction */ dws->dma_mapped = map_dma_buffers(dws); + /* + * Interrupt mode + * we only need set the TXEI IRQ, as TX/RX always happen syncronizely + */ if (!dws->dma_mapped && !chip->poll_mode) { - if (dws->rx) - imask |= SPI_INT_RXFI; - if (dws->tx) - imask |= SPI_INT_TXEI; + int templen = dws->len / dws->n_bytes; + txint_level = dws->fifo_len / 2; + txint_level = (templen > txint_level) ? txint_level : templen; + + imask |= SPI_INT_TXEI; dws->transfer_handler = interrupt_transfer; } @@ -549,21 +578,23 @@ static void pump_transfers(unsigned long data) * 2. clk_div is changed * 3. control value changes */ - if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) { + if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) { spi_enable_chip(dws, 0); if (dw_readw(dws, ctrl0) != cr0) dw_writew(dws, ctrl0, cr0); + spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); + spi_chip_sel(dws, spi->chip_select); + /* Set the interrupt mask, for poll mode just diable all int */ spi_mask_intr(dws, 0xff); - if (!chip->poll_mode) + if (imask) spi_umask_intr(dws, imask); + if (txint_level) + dw_writew(dws, txfltr, txint_level); - spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); - spi_chip_sel(dws, spi->chip_select); spi_enable_chip(dws, 1); - if (cs_change) dws->prev_chip = chip; } @@ -670,9 +701,6 @@ static int dw_spi_setup(struct spi_device *spi) chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) return -ENOMEM; - - chip->cs_control = null_cs_control; - chip->enable_dma = 0; } /* @@ -712,11 +740,11 @@ static int dw_spi_setup(struct spi_device *spi) } chip->bits_per_word = spi->bits_per_word; + if (!spi->max_speed_hz) { + dev_err(&spi->dev, "No max speed HZ parameter\n"); + return -EINVAL; + } chip->speed_hz = spi->max_speed_hz; - if (chip->speed_hz) - chip->clk_div = 25000000 / chip->speed_hz; - else - chip->clk_div = 8; /* default value */ chip->tmode = 0; /* Tx & Rx */ /* Default SPI mode is SCPOL = 0, SCPH = 0 */ @@ -735,7 +763,7 @@ static void dw_spi_cleanup(struct spi_device *spi) kfree(chip); } -static int __init init_queue(struct dw_spi *dws) +static int __devinit init_queue(struct dw_spi *dws) { INIT_LIST_HEAD(&dws->queue); spin_lock_init(&dws->lock); @@ -817,6 +845,22 @@ static void spi_hw_init(struct dw_spi *dws) spi_mask_intr(dws, 0xff); spi_enable_chip(dws, 1); flush(dws); + + /* + * Try to detect the FIFO depth if not set by interface driver, + * the depth could be from 2 to 256 from HW spec + */ + if (!dws->fifo_len) { + u32 fifo; + for (fifo = 2; fifo <= 257; fifo++) { + dw_writew(dws, txfltr, fifo); + if (fifo != dw_readw(dws, txfltr)) + break; + } + + dws->fifo_len = (fifo == 257) ? 0 : fifo; + dw_writew(dws, txfltr, 0); + } } int __devinit dw_spi_add_host(struct dw_spi *dws) @@ -838,7 +882,7 @@ int __devinit dw_spi_add_host(struct dw_spi *dws) dws->dma_inited = 0; dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); - ret = request_irq(dws->irq, dw_spi_irq, 0, + ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, "dw_spi", dws); if (ret < 0) { dev_err(&master->dev, "can not get IRQ\n"); @@ -913,6 +957,7 @@ void __devexit dw_spi_remove_host(struct dw_spi *dws) /* Disconnect from the SPI framework */ spi_unregister_master(dws->master); } +EXPORT_SYMBOL(dw_spi_remove_host); int dw_spi_suspend_host(struct dw_spi *dws) { diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c new file mode 100644 index 00000000000..db35bd9c1b2 --- /dev/null +++ b/drivers/spi/dw_spi_mmio.c @@ -0,0 +1,148 @@ +/* + * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core + * + * Copyright (c) 2010, Octasic semiconductor. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spi/dw_spi.h> +#include <linux/spi/spi.h> + +#define DRIVER_NAME "dw_spi_mmio" + +struct dw_spi_mmio { + struct dw_spi dws; + struct clk *clk; +}; + +static int __devinit dw_spi_mmio_probe(struct platform_device *pdev) +{ + struct dw_spi_mmio *dwsmmio; + struct dw_spi *dws; + struct resource *mem, *ioarea; + int ret; + + dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL); + if (!dwsmmio) { + ret = -ENOMEM; + goto err_end; + } + + dws = &dwsmmio->dws; + + /* Get basic io resource and map it */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no mem resource?\n"); + ret = -EINVAL; + goto err_kfree; + } + + ioarea = request_mem_region(mem->start, resource_size(mem), + pdev->name); + if (!ioarea) { + dev_err(&pdev->dev, "SPI region already claimed\n"); + ret = -EBUSY; + goto err_kfree; + } + + dws->regs = ioremap_nocache(mem->start, resource_size(mem)); + if (!dws->regs) { + dev_err(&pdev->dev, "SPI region already mapped\n"); + ret = -ENOMEM; + goto err_release_reg; + } + + dws->irq = platform_get_irq(pdev, 0); + if (dws->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + ret = dws->irq; /* -ENXIO */ + goto err_unmap; + } + + dwsmmio->clk = clk_get(&pdev->dev, NULL); + if (!dwsmmio->clk) { + ret = -ENODEV; + goto err_irq; + } + clk_enable(dwsmmio->clk); + + dws->parent_dev = &pdev->dev; + dws->bus_num = 0; + dws->num_cs = 4; + dws->max_freq = clk_get_rate(dwsmmio->clk); + + ret = dw_spi_add_host(dws); + if (ret) + goto err_clk; + + platform_set_drvdata(pdev, dwsmmio); + return 0; + +err_clk: + clk_disable(dwsmmio->clk); + clk_put(dwsmmio->clk); + dwsmmio->clk = NULL; +err_irq: + free_irq(dws->irq, dws); +err_unmap: + iounmap(dws->regs); +err_release_reg: + release_mem_region(mem->start, resource_size(mem)); +err_kfree: + kfree(dwsmmio); +err_end: + return ret; +} + +static int __devexit dw_spi_mmio_remove(struct platform_device *pdev) +{ + struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); + struct resource *mem; + + platform_set_drvdata(pdev, NULL); + + clk_disable(dwsmmio->clk); + clk_put(dwsmmio->clk); + dwsmmio->clk = NULL; + + free_irq(dwsmmio->dws.irq, &dwsmmio->dws); + dw_spi_remove_host(&dwsmmio->dws); + iounmap(dwsmmio->dws.regs); + kfree(dwsmmio); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, resource_size(mem)); + return 0; +} + +static struct platform_driver dw_spi_mmio_driver = { + .remove = __devexit_p(dw_spi_mmio_remove), + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init dw_spi_mmio_init(void) +{ + return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe); +} +module_init(dw_spi_mmio_init); + +static void __exit dw_spi_mmio_exit(void) +{ + platform_driver_unregister(&dw_spi_mmio_driver); +} +module_exit(dw_spi_mmio_exit); + +MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>"); +MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c index 34ba6916173..1f52755dc87 100644 --- a/drivers/spi/dw_spi_pci.c +++ b/drivers/spi/dw_spi_pci.c @@ -19,6 +19,7 @@ #include <linux/interrupt.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/spi/dw_spi.h> #include <linux/spi/spi.h> @@ -73,6 +74,7 @@ static int __devinit spi_pci_probe(struct pci_dev *pdev, dws->num_cs = 4; dws->max_freq = 25000000; /* for Moorestwon */ dws->irq = pdev->irq; + dws->fifo_len = 40; /* FIFO has 40 words buffer */ ret = dw_spi_add_host(dws); if (ret) @@ -98,6 +100,7 @@ static void __devexit spi_pci_remove(struct pci_dev *pdev) struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); + dw_spi_remove_host(&dwpci->dws); iounmap(dwpci->dws.regs); pci_release_region(pdev, 0); kfree(dwpci); diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/ep93xx_spi.c new file mode 100644 index 00000000000..0ba35df9a6d --- /dev/null +++ b/drivers/spi/ep93xx_spi.c @@ -0,0 +1,938 @@ +/* + * Driver for Cirrus Logic EP93xx SPI controller. + * + * Copyright (c) 2010 Mika Westerberg + * + * Explicit FIFO handling code was inspired by amba-pl022 driver. + * + * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. + * + * For more information about the SPI controller see documentation on Cirrus + * Logic web site: + * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/bitops.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/workqueue.h> +#include <linux/sched.h> +#include <linux/spi/spi.h> + +#include <mach/ep93xx_spi.h> + +#define SSPCR0 0x0000 +#define SSPCR0_MODE_SHIFT 6 +#define SSPCR0_SCR_SHIFT 8 + +#define SSPCR1 0x0004 +#define SSPCR1_RIE BIT(0) +#define SSPCR1_TIE BIT(1) +#define SSPCR1_RORIE BIT(2) +#define SSPCR1_LBM BIT(3) +#define SSPCR1_SSE BIT(4) +#define SSPCR1_MS BIT(5) +#define SSPCR1_SOD BIT(6) + +#define SSPDR 0x0008 + +#define SSPSR 0x000c +#define SSPSR_TFE BIT(0) +#define SSPSR_TNF BIT(1) +#define SSPSR_RNE BIT(2) +#define SSPSR_RFF BIT(3) +#define SSPSR_BSY BIT(4) +#define SSPCPSR 0x0010 + +#define SSPIIR 0x0014 +#define SSPIIR_RIS BIT(0) +#define SSPIIR_TIS BIT(1) +#define SSPIIR_RORIS BIT(2) +#define SSPICR SSPIIR + +/* timeout in milliseconds */ +#define SPI_TIMEOUT 5 +/* maximum depth of RX/TX FIFO */ +#define SPI_FIFO_SIZE 8 + +/** + * struct ep93xx_spi - EP93xx SPI controller structure + * @lock: spinlock that protects concurrent accesses to fields @running, + * @current_msg and @msg_queue + * @pdev: pointer to platform device + * @clk: clock for the controller + * @regs_base: pointer to ioremap()'d registers + * @irq: IRQ number used by the driver + * @min_rate: minimum clock rate (in Hz) supported by the controller + * @max_rate: maximum clock rate (in Hz) supported by the controller + * @running: is the queue running + * @wq: workqueue used by the driver + * @msg_work: work that is queued for the driver + * @wait: wait here until given transfer is completed + * @msg_queue: queue for the messages + * @current_msg: message that is currently processed (or %NULL if none) + * @tx: current byte in transfer to transmit + * @rx: current byte in transfer to receive + * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one + * frame decreases this level and sending one frame increases it. + * + * This structure holds EP93xx SPI controller specific information. When + * @running is %true, driver accepts transfer requests from protocol drivers. + * @current_msg is used to hold pointer to the message that is currently + * processed. If @current_msg is %NULL, it means that no processing is going + * on. + * + * Most of the fields are only written once and they can be accessed without + * taking the @lock. Fields that are accessed concurrently are: @current_msg, + * @running, and @msg_queue. + */ +struct ep93xx_spi { + spinlock_t lock; + const struct platform_device *pdev; + struct clk *clk; + void __iomem *regs_base; + int irq; + unsigned long min_rate; + unsigned long max_rate; + bool running; + struct workqueue_struct *wq; + struct work_struct msg_work; + struct completion wait; + struct list_head msg_queue; + struct spi_message *current_msg; + size_t tx; + size_t rx; + size_t fifo_level; +}; + +/** + * struct ep93xx_spi_chip - SPI device hardware settings + * @spi: back pointer to the SPI device + * @rate: max rate in hz this chip supports + * @div_cpsr: cpsr (pre-scaler) divider + * @div_scr: scr divider + * @dss: bits per word (4 - 16 bits) + * @ops: private chip operations + * + * This structure is used to store hardware register specific settings for each + * SPI device. Settings are written to hardware by function + * ep93xx_spi_chip_setup(). + */ +struct ep93xx_spi_chip { + const struct spi_device *spi; + unsigned long rate; + u8 div_cpsr; + u8 div_scr; + u8 dss; + struct ep93xx_spi_chip_ops *ops; +}; + +/* converts bits per word to CR0.DSS value */ +#define bits_per_word_to_dss(bpw) ((bpw) - 1) + +static inline void +ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value) +{ + __raw_writeb(value, espi->regs_base + reg); +} + +static inline u8 +ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) +{ + return __raw_readb(spi->regs_base + reg); +} + +static inline void +ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value) +{ + __raw_writew(value, espi->regs_base + reg); +} + +static inline u16 +ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) +{ + return __raw_readw(spi->regs_base + reg); +} + +static int ep93xx_spi_enable(const struct ep93xx_spi *espi) +{ + u8 regval; + int err; + + err = clk_enable(espi->clk); + if (err) + return err; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval |= SSPCR1_SSE; + ep93xx_spi_write_u8(espi, SSPCR1, regval); + + return 0; +} + +static void ep93xx_spi_disable(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval &= ~SSPCR1_SSE; + ep93xx_spi_write_u8(espi, SSPCR1, regval); + + clk_disable(espi->clk); +} + +static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); + ep93xx_spi_write_u8(espi, SSPCR1, regval); +} + +static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); + ep93xx_spi_write_u8(espi, SSPCR1, regval); +} + +/** + * ep93xx_spi_calc_divisors() - calculates SPI clock divisors + * @espi: ep93xx SPI controller struct + * @chip: divisors are calculated for this chip + * @rate: desired SPI output clock rate + * + * Function calculates cpsr (clock pre-scaler) and scr divisors based on + * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If, + * for some reason, divisors cannot be calculated nothing is stored and + * %-EINVAL is returned. + */ +static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, + struct ep93xx_spi_chip *chip, + unsigned long rate) +{ + unsigned long spi_clk_rate = clk_get_rate(espi->clk); + int cpsr, scr; + + /* + * Make sure that max value is between values supported by the + * controller. Note that minimum value is already checked in + * ep93xx_spi_transfer(). + */ + rate = clamp(rate, espi->min_rate, espi->max_rate); + + /* + * Calculate divisors so that we can get speed according the + * following formula: + * rate = spi_clock_rate / (cpsr * (1 + scr)) + * + * cpsr must be even number and starts from 2, scr can be any number + * between 0 and 255. + */ + for (cpsr = 2; cpsr <= 254; cpsr += 2) { + for (scr = 0; scr <= 255; scr++) { + if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { + chip->div_scr = (u8)scr; + chip->div_cpsr = (u8)cpsr; + return 0; + } + } + } + + return -EINVAL; +} + +static void ep93xx_spi_cs_control(struct spi_device *spi, bool control) +{ + struct ep93xx_spi_chip *chip = spi_get_ctldata(spi); + int value = (spi->mode & SPI_CS_HIGH) ? control : !control; + + if (chip->ops && chip->ops->cs_control) + chip->ops->cs_control(spi, value); +} + +/** + * ep93xx_spi_setup() - setup an SPI device + * @spi: SPI device to setup + * + * This function sets up SPI device mode, speed etc. Can be called multiple + * times for a single device. Returns %0 in case of success, negative error in + * case of failure. When this function returns success, the device is + * deselected. + */ +static int ep93xx_spi_setup(struct spi_device *spi) +{ + struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); + struct ep93xx_spi_chip *chip; + + if (spi->bits_per_word < 4 || spi->bits_per_word > 16) { + dev_err(&espi->pdev->dev, "invalid bits per word %d\n", + spi->bits_per_word); + return -EINVAL; + } + + chip = spi_get_ctldata(spi); + if (!chip) { + dev_dbg(&espi->pdev->dev, "initial setup for %s\n", + spi->modalias); + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->spi = spi; + chip->ops = spi->controller_data; + + if (chip->ops && chip->ops->setup) { + int ret = chip->ops->setup(spi); + if (ret) { + kfree(chip); + return ret; + } + } + + spi_set_ctldata(spi, chip); + } + + if (spi->max_speed_hz != chip->rate) { + int err; + + err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz); + if (err != 0) { + spi_set_ctldata(spi, NULL); + kfree(chip); + return err; + } + chip->rate = spi->max_speed_hz; + } + + chip->dss = bits_per_word_to_dss(spi->bits_per_word); + + ep93xx_spi_cs_control(spi, false); + return 0; +} + +/** + * ep93xx_spi_transfer() - queue message to be transferred + * @spi: target SPI device + * @msg: message to be transferred + * + * This function is called by SPI device drivers when they are going to transfer + * a new message. It simply puts the message in the queue and schedules + * workqueue to perform the actual transfer later on. + * + * Returns %0 on success and negative error in case of failure. + */ +static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); + struct spi_transfer *t; + unsigned long flags; + + if (!msg || !msg->complete) + return -EINVAL; + + /* first validate each transfer */ + list_for_each_entry(t, &msg->transfers, transfer_list) { + if (t->bits_per_word) { + if (t->bits_per_word < 4 || t->bits_per_word > 16) + return -EINVAL; + } + if (t->speed_hz && t->speed_hz < espi->min_rate) + return -EINVAL; + } + + /* + * Now that we own the message, let's initialize it so that it is + * suitable for us. We use @msg->status to signal whether there was + * error in transfer and @msg->state is used to hold pointer to the + * current transfer (or %NULL if no active current transfer). + */ + msg->state = NULL; + msg->status = 0; + msg->actual_length = 0; + + spin_lock_irqsave(&espi->lock, flags); + if (!espi->running) { + spin_unlock_irqrestore(&espi->lock, flags); + return -ESHUTDOWN; + } + list_add_tail(&msg->queue, &espi->msg_queue); + queue_work(espi->wq, &espi->msg_work); + spin_unlock_irqrestore(&espi->lock, flags); + + return 0; +} + +/** + * ep93xx_spi_cleanup() - cleans up master controller specific state + * @spi: SPI device to cleanup + * + * This function releases master controller specific state for given @spi + * device. + */ +static void ep93xx_spi_cleanup(struct spi_device *spi) +{ + struct ep93xx_spi_chip *chip; + + chip = spi_get_ctldata(spi); + if (chip) { + if (chip->ops && chip->ops->cleanup) + chip->ops->cleanup(spi); + spi_set_ctldata(spi, NULL); + kfree(chip); + } +} + +/** + * ep93xx_spi_chip_setup() - configures hardware according to given @chip + * @espi: ep93xx SPI controller struct + * @chip: chip specific settings + * + * This function sets up the actual hardware registers with settings given in + * @chip. Note that no validation is done so make sure that callers validate + * settings before calling this. + */ +static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, + const struct ep93xx_spi_chip *chip) +{ + u16 cr0; + + cr0 = chip->div_scr << SSPCR0_SCR_SHIFT; + cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; + cr0 |= chip->dss; + + dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", + chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss); + dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); + + ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr); + ep93xx_spi_write_u16(espi, SSPCR0, cr0); +} + +static inline int bits_per_word(const struct ep93xx_spi *espi) +{ + struct spi_message *msg = espi->current_msg; + struct spi_transfer *t = msg->state; + + return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word; +} + +static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) +{ + if (bits_per_word(espi) > 8) { + u16 tx_val = 0; + + if (t->tx_buf) + tx_val = ((u16 *)t->tx_buf)[espi->tx]; + ep93xx_spi_write_u16(espi, SSPDR, tx_val); + espi->tx += sizeof(tx_val); + } else { + u8 tx_val = 0; + + if (t->tx_buf) + tx_val = ((u8 *)t->tx_buf)[espi->tx]; + ep93xx_spi_write_u8(espi, SSPDR, tx_val); + espi->tx += sizeof(tx_val); + } +} + +static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) +{ + if (bits_per_word(espi) > 8) { + u16 rx_val; + + rx_val = ep93xx_spi_read_u16(espi, SSPDR); + if (t->rx_buf) + ((u16 *)t->rx_buf)[espi->rx] = rx_val; + espi->rx += sizeof(rx_val); + } else { + u8 rx_val; + + rx_val = ep93xx_spi_read_u8(espi, SSPDR); + if (t->rx_buf) + ((u8 *)t->rx_buf)[espi->rx] = rx_val; + espi->rx += sizeof(rx_val); + } +} + +/** + * ep93xx_spi_read_write() - perform next RX/TX transfer + * @espi: ep93xx SPI controller struct + * + * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If + * called several times, the whole transfer will be completed. Returns + * %-EINPROGRESS when current transfer was not yet completed otherwise %0. + * + * When this function is finished, RX FIFO should be empty and TX FIFO should be + * full. + */ +static int ep93xx_spi_read_write(struct ep93xx_spi *espi) +{ + struct spi_message *msg = espi->current_msg; + struct spi_transfer *t = msg->state; + + /* read as long as RX FIFO has frames in it */ + while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) { + ep93xx_do_read(espi, t); + espi->fifo_level--; + } + + /* write as long as TX FIFO has room */ + while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { + ep93xx_do_write(espi, t); + espi->fifo_level++; + } + + if (espi->rx == t->len) { + msg->actual_length += t->len; + return 0; + } + + return -EINPROGRESS; +} + +/** + * ep93xx_spi_process_transfer() - processes one SPI transfer + * @espi: ep93xx SPI controller struct + * @msg: current message + * @t: transfer to process + * + * This function processes one SPI transfer given in @t. Function waits until + * transfer is complete (may sleep) and updates @msg->status based on whether + * transfer was succesfully processed or not. + */ +static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, + struct spi_message *msg, + struct spi_transfer *t) +{ + struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); + + msg->state = t; + + /* + * Handle any transfer specific settings if needed. We use + * temporary chip settings here and restore original later when + * the transfer is finished. + */ + if (t->speed_hz || t->bits_per_word) { + struct ep93xx_spi_chip tmp_chip = *chip; + + if (t->speed_hz) { + int err; + + err = ep93xx_spi_calc_divisors(espi, &tmp_chip, + t->speed_hz); + if (err) { + dev_err(&espi->pdev->dev, + "failed to adjust speed\n"); + msg->status = err; + return; + } + } + + if (t->bits_per_word) + tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word); + + /* + * Set up temporary new hw settings for this transfer. + */ + ep93xx_spi_chip_setup(espi, &tmp_chip); + } + + espi->rx = 0; + espi->tx = 0; + + /* + * Now everything is set up for the current transfer. We prime the TX + * FIFO, enable interrupts, and wait for the transfer to complete. + */ + if (ep93xx_spi_read_write(espi)) { + ep93xx_spi_enable_interrupts(espi); + wait_for_completion(&espi->wait); + } + + /* + * In case of error during transmit, we bail out from processing + * the message. + */ + if (msg->status) + return; + + /* + * After this transfer is finished, perform any possible + * post-transfer actions requested by the protocol driver. + */ + if (t->delay_usecs) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(t->delay_usecs)); + } + if (t->cs_change) { + if (!list_is_last(&t->transfer_list, &msg->transfers)) { + /* + * In case protocol driver is asking us to drop the + * chipselect briefly, we let the scheduler to handle + * any "delay" here. + */ + ep93xx_spi_cs_control(msg->spi, false); + cond_resched(); + ep93xx_spi_cs_control(msg->spi, true); + } + } + + if (t->speed_hz || t->bits_per_word) + ep93xx_spi_chip_setup(espi, chip); +} + +/* + * ep93xx_spi_process_message() - process one SPI message + * @espi: ep93xx SPI controller struct + * @msg: message to process + * + * This function processes a single SPI message. We go through all transfers in + * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is + * asserted during the whole message (unless per transfer cs_change is set). + * + * @msg->status contains %0 in case of success or negative error code in case of + * failure. + */ +static void ep93xx_spi_process_message(struct ep93xx_spi *espi, + struct spi_message *msg) +{ + unsigned long timeout; + struct spi_transfer *t; + int err; + + /* + * Enable the SPI controller and its clock. + */ + err = ep93xx_spi_enable(espi); + if (err) { + dev_err(&espi->pdev->dev, "failed to enable SPI controller\n"); + msg->status = err; + return; + } + + /* + * Just to be sure: flush any data from RX FIFO. + */ + timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); + while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) { + if (time_after(jiffies, timeout)) { + dev_warn(&espi->pdev->dev, + "timeout while flushing RX FIFO\n"); + msg->status = -ETIMEDOUT; + return; + } + ep93xx_spi_read_u16(espi, SSPDR); + } + + /* + * We explicitly handle FIFO level. This way we don't have to check TX + * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. + */ + espi->fifo_level = 0; + + /* + * Update SPI controller registers according to spi device and assert + * the chipselect. + */ + ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi)); + ep93xx_spi_cs_control(msg->spi, true); + + list_for_each_entry(t, &msg->transfers, transfer_list) { + ep93xx_spi_process_transfer(espi, msg, t); + if (msg->status) + break; + } + + /* + * Now the whole message is transferred (or failed for some reason). We + * deselect the device and disable the SPI controller. + */ + ep93xx_spi_cs_control(msg->spi, false); + ep93xx_spi_disable(espi); +} + +#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work)) + +/** + * ep93xx_spi_work() - EP93xx SPI workqueue worker function + * @work: work struct + * + * Workqueue worker function. This function is called when there are new + * SPI messages to be processed. Message is taken out from the queue and then + * passed to ep93xx_spi_process_message(). + * + * After message is transferred, protocol driver is notified by calling + * @msg->complete(). In case of error, @msg->status is set to negative error + * number, otherwise it contains zero (and @msg->actual_length is updated). + */ +static void ep93xx_spi_work(struct work_struct *work) +{ + struct ep93xx_spi *espi = work_to_espi(work); + struct spi_message *msg; + + spin_lock_irq(&espi->lock); + if (!espi->running || espi->current_msg || + list_empty(&espi->msg_queue)) { + spin_unlock_irq(&espi->lock); + return; + } + msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); + list_del_init(&msg->queue); + espi->current_msg = msg; + spin_unlock_irq(&espi->lock); + + ep93xx_spi_process_message(espi, msg); + + /* + * Update the current message and re-schedule ourselves if there are + * more messages in the queue. + */ + spin_lock_irq(&espi->lock); + espi->current_msg = NULL; + if (espi->running && !list_empty(&espi->msg_queue)) + queue_work(espi->wq, &espi->msg_work); + spin_unlock_irq(&espi->lock); + + /* notify the protocol driver that we are done with this message */ + msg->complete(msg->context); +} + +static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) +{ + struct ep93xx_spi *espi = dev_id; + u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR); + + /* + * If we got ROR (receive overrun) interrupt we know that something is + * wrong. Just abort the message. + */ + if (unlikely(irq_status & SSPIIR_RORIS)) { + /* clear the overrun interrupt */ + ep93xx_spi_write_u8(espi, SSPICR, 0); + dev_warn(&espi->pdev->dev, + "receive overrun, aborting the message\n"); + espi->current_msg->status = -EIO; + } else { + /* + * Interrupt is either RX (RIS) or TX (TIS). For both cases we + * simply execute next data transfer. + */ + if (ep93xx_spi_read_write(espi)) { + /* + * In normal case, there still is some processing left + * for current transfer. Let's wait for the next + * interrupt then. + */ + return IRQ_HANDLED; + } + } + + /* + * Current transfer is finished, either with error or with success. In + * any case we disable interrupts and notify the worker to handle + * any post-processing of the message. + */ + ep93xx_spi_disable_interrupts(espi); + complete(&espi->wait); + return IRQ_HANDLED; +} + +static int __init ep93xx_spi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct ep93xx_spi_info *info; + struct ep93xx_spi *espi; + struct resource *res; + int error; + + info = pdev->dev.platform_data; + + master = spi_alloc_master(&pdev->dev, sizeof(*espi)); + if (!master) { + dev_err(&pdev->dev, "failed to allocate spi master\n"); + return -ENOMEM; + } + + master->setup = ep93xx_spi_setup; + master->transfer = ep93xx_spi_transfer; + master->cleanup = ep93xx_spi_cleanup; + master->bus_num = pdev->id; + master->num_chipselect = info->num_chipselect; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + platform_set_drvdata(pdev, master); + + espi = spi_master_get_devdata(master); + + espi->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(espi->clk)) { + dev_err(&pdev->dev, "unable to get spi clock\n"); + error = PTR_ERR(espi->clk); + goto fail_release_master; + } + + spin_lock_init(&espi->lock); + init_completion(&espi->wait); + + /* + * Calculate maximum and minimum supported clock rates + * for the controller. + */ + espi->max_rate = clk_get_rate(espi->clk) / 2; + espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); + espi->pdev = pdev; + + espi->irq = platform_get_irq(pdev, 0); + if (espi->irq < 0) { + error = -EBUSY; + dev_err(&pdev->dev, "failed to get irq resources\n"); + goto fail_put_clock; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "unable to get iomem resource\n"); + error = -ENODEV; + goto fail_put_clock; + } + + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (!res) { + dev_err(&pdev->dev, "unable to request iomem resources\n"); + error = -EBUSY; + goto fail_put_clock; + } + + espi->regs_base = ioremap(res->start, resource_size(res)); + if (!espi->regs_base) { + dev_err(&pdev->dev, "failed to map resources\n"); + error = -ENODEV; + goto fail_free_mem; + } + + error = request_irq(espi->irq, ep93xx_spi_interrupt, 0, + "ep93xx-spi", espi); + if (error) { + dev_err(&pdev->dev, "failed to request irq\n"); + goto fail_unmap_regs; + } + + espi->wq = create_singlethread_workqueue("ep93xx_spid"); + if (!espi->wq) { + dev_err(&pdev->dev, "unable to create workqueue\n"); + goto fail_free_irq; + } + INIT_WORK(&espi->msg_work, ep93xx_spi_work); + INIT_LIST_HEAD(&espi->msg_queue); + espi->running = true; + + /* make sure that the hardware is disabled */ + ep93xx_spi_write_u8(espi, SSPCR1, 0); + + error = spi_register_master(master); + if (error) { + dev_err(&pdev->dev, "failed to register SPI master\n"); + goto fail_free_queue; + } + + dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", + (unsigned long)res->start, espi->irq); + + return 0; + +fail_free_queue: + destroy_workqueue(espi->wq); +fail_free_irq: + free_irq(espi->irq, espi); +fail_unmap_regs: + iounmap(espi->regs_base); +fail_free_mem: + release_mem_region(res->start, resource_size(res)); +fail_put_clock: + clk_put(espi->clk); +fail_release_master: + spi_master_put(master); + platform_set_drvdata(pdev, NULL); + + return error; +} + +static int __exit ep93xx_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct ep93xx_spi *espi = spi_master_get_devdata(master); + struct resource *res; + + spin_lock_irq(&espi->lock); + espi->running = false; + spin_unlock_irq(&espi->lock); + + destroy_workqueue(espi->wq); + + /* + * Complete remaining messages with %-ESHUTDOWN status. + */ + spin_lock_irq(&espi->lock); + while (!list_empty(&espi->msg_queue)) { + struct spi_message *msg; + + msg = list_first_entry(&espi->msg_queue, + struct spi_message, queue); + list_del_init(&msg->queue); + msg->status = -ESHUTDOWN; + spin_unlock_irq(&espi->lock); + msg->complete(msg->context); + spin_lock_irq(&espi->lock); + } + spin_unlock_irq(&espi->lock); + + free_irq(espi->irq, espi); + iounmap(espi->regs_base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + clk_put(espi->clk); + platform_set_drvdata(pdev, NULL); + + spi_unregister_master(master); + return 0; +} + +static struct platform_driver ep93xx_spi_driver = { + .driver = { + .name = "ep93xx-spi", + .owner = THIS_MODULE, + }, + .remove = __exit_p(ep93xx_spi_remove), +}; + +static int __init ep93xx_spi_init(void) +{ + return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe); +} +module_init(ep93xx_spi_init); + +static void __exit ep93xx_spi_exit(void) +{ + platform_driver_unregister(&ep93xx_spi_driver); +} +module_exit(ep93xx_spi_exit); + +MODULE_DESCRIPTION("EP93xx SPI Controller driver"); +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ep93xx-spi"); diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c new file mode 100644 index 00000000000..77d9e7ee8b2 --- /dev/null +++ b/drivers/spi/mpc512x_psc_spi.c @@ -0,0 +1,578 @@ +/* + * MPC512x PSC in SPI mode driver. + * + * Copyright (C) 2007,2008 Freescale Semiconductor Inc. + * Original port from 52xx driver: + * Hongjun Chen <hong-jun.chen@freescale.com> + * + * Fork of mpc52xx_psc_spi.c: + * Copyright (C) 2006 TOPTICA Photonics AG., Dragos Carp + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/workqueue.h> +#include <linux/completion.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/spi/spi.h> +#include <linux/fsl_devices.h> +#include <asm/mpc52xx_psc.h> + +struct mpc512x_psc_spi { + void (*cs_control)(struct spi_device *spi, bool on); + u32 sysclk; + + /* driver internal data */ + struct mpc52xx_psc __iomem *psc; + struct mpc512x_psc_fifo __iomem *fifo; + unsigned int irq; + u8 bits_per_word; + u8 busy; + u32 mclk; + u8 eofbyte; + + struct workqueue_struct *workqueue; + struct work_struct work; + + struct list_head queue; + spinlock_t lock; /* Message queue lock */ + + struct completion done; +}; + +/* controller state */ +struct mpc512x_psc_spi_cs { + int bits_per_word; + int speed_hz; +}; + +/* set clock freq, clock ramp, bits per work + * if t is NULL then reset the values to the default values + */ +static int mpc512x_psc_spi_transfer_setup(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + + cs->speed_hz = (t && t->speed_hz) + ? t->speed_hz : spi->max_speed_hz; + cs->bits_per_word = (t && t->bits_per_word) + ? t->bits_per_word : spi->bits_per_word; + cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8; + return 0; +} + +static void mpc512x_psc_spi_activate_cs(struct spi_device *spi) +{ + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc52xx_psc __iomem *psc = mps->psc; + u32 sicr; + u32 ccr; + u16 bclkdiv; + + sicr = in_be32(&psc->sicr); + + /* Set clock phase and polarity */ + if (spi->mode & SPI_CPHA) + sicr |= 0x00001000; + else + sicr &= ~0x00001000; + + if (spi->mode & SPI_CPOL) + sicr |= 0x00002000; + else + sicr &= ~0x00002000; + + if (spi->mode & SPI_LSB_FIRST) + sicr |= 0x10000000; + else + sicr &= ~0x10000000; + out_be32(&psc->sicr, sicr); + + ccr = in_be32(&psc->ccr); + ccr &= 0xFF000000; + if (cs->speed_hz) + bclkdiv = (mps->mclk / cs->speed_hz) - 1; + else + bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ + + ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); + out_be32(&psc->ccr, ccr); + mps->bits_per_word = cs->bits_per_word; + + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0); +} + +static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1); + +} + +/* extract and scale size field in txsz or rxsz */ +#define MPC512x_PSC_FIFO_SZ(sz) ((sz & 0x7ff) << 2); + +#define EOFBYTE 1 + +static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + size_t len = t->len; + u8 *tx_buf = (u8 *)t->tx_buf; + u8 *rx_buf = (u8 *)t->rx_buf; + + if (!tx_buf && !rx_buf && t->len) + return -EINVAL; + + /* Zero MR2 */ + in_8(&psc->mode); + out_8(&psc->mode, 0x0); + + while (len) { + int count; + int i; + u8 data; + size_t fifosz; + int rxcount; + + /* + * The number of bytes that can be sent at a time + * depends on the fifo size. + */ + fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz)); + count = min(fifosz, len); + + for (i = count; i > 0; i--) { + data = tx_buf ? *tx_buf++ : 0; + if (len == EOFBYTE) + setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); + out_8(&fifo->txdata_8, data); + len--; + } + + INIT_COMPLETION(mps->done); + + /* interrupt on tx fifo empty */ + out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); + out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY); + + /* enable transmiter/receiver */ + out_8(&psc->command, + MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); + + wait_for_completion(&mps->done); + + mdelay(1); + + /* rx fifo should have count bytes in it */ + rxcount = in_be32(&fifo->rxcnt); + if (rxcount != count) + mdelay(1); + + rxcount = in_be32(&fifo->rxcnt); + if (rxcount != count) { + dev_warn(&spi->dev, "expected %d bytes in rx fifo " + "but got %d\n", count, rxcount); + } + + rxcount = min(rxcount, count); + for (i = rxcount; i > 0; i--) { + data = in_8(&fifo->rxdata_8); + if (rx_buf) + *rx_buf++ = data; + } + while (in_be32(&fifo->rxcnt)) { + in_8(&fifo->rxdata_8); + } + + out_8(&psc->command, + MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + } + /* disable transmiter/receiver and fifo interrupt */ + out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + out_be32(&fifo->tximr, 0); + return 0; +} + +static void mpc512x_psc_spi_work(struct work_struct *work) +{ + struct mpc512x_psc_spi *mps = container_of(work, + struct mpc512x_psc_spi, + work); + + spin_lock_irq(&mps->lock); + mps->busy = 1; + while (!list_empty(&mps->queue)) { + struct spi_message *m; + struct spi_device *spi; + struct spi_transfer *t = NULL; + unsigned cs_change; + int status; + + m = container_of(mps->queue.next, struct spi_message, queue); + list_del_init(&m->queue); + spin_unlock_irq(&mps->lock); + + spi = m->spi; + cs_change = 1; + status = 0; + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + status = mpc512x_psc_spi_transfer_setup(spi, t); + if (status < 0) + break; + } + + if (cs_change) + mpc512x_psc_spi_activate_cs(spi); + cs_change = t->cs_change; + + status = mpc512x_psc_spi_transfer_rxtx(spi, t); + if (status) + break; + m->actual_length += t->len; + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (cs_change) + mpc512x_psc_spi_deactivate_cs(spi); + } + + m->status = status; + m->complete(m->context); + + if (status || !cs_change) + mpc512x_psc_spi_deactivate_cs(spi); + + mpc512x_psc_spi_transfer_setup(spi, NULL); + + spin_lock_irq(&mps->lock); + } + mps->busy = 0; + spin_unlock_irq(&mps->lock); +} + +static int mpc512x_psc_spi_setup(struct spi_device *spi) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + unsigned long flags; + + if (spi->bits_per_word % 8) + return -EINVAL; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + cs->bits_per_word = spi->bits_per_word; + cs->speed_hz = spi->max_speed_hz; + + spin_lock_irqsave(&mps->lock, flags); + if (!mps->busy) + mpc512x_psc_spi_deactivate_cs(spi); + spin_unlock_irqrestore(&mps->lock, flags); + + return 0; +} + +static int mpc512x_psc_spi_transfer(struct spi_device *spi, + struct spi_message *m) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&mps->lock, flags); + list_add_tail(&m->queue, &mps->queue); + queue_work(mps->workqueue, &mps->work); + spin_unlock_irqrestore(&mps->lock, flags); + + return 0; +} + +static void mpc512x_psc_spi_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +static int mpc512x_psc_spi_port_config(struct spi_master *master, + struct mpc512x_psc_spi *mps) +{ + struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + struct clk *spiclk; + int ret = 0; + char name[32]; + u32 sicr; + u32 ccr; + u16 bclkdiv; + + sprintf(name, "psc%d_mclk", master->bus_num); + spiclk = clk_get(&master->dev, name); + clk_enable(spiclk); + mps->mclk = clk_get_rate(spiclk); + clk_put(spiclk); + + /* Reset the PSC into a known state */ + out_8(&psc->command, MPC52xx_PSC_RST_RX); + out_8(&psc->command, MPC52xx_PSC_RST_TX); + out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + + /* Disable psc interrupts all useful interrupts are in fifo */ + out_be16(&psc->isr_imr.imr, 0); + + /* Disable fifo interrupts, will be enabled later */ + out_be32(&fifo->tximr, 0); + out_be32(&fifo->rximr, 0); + + /* Setup fifo slice address and size */ + /*out_be32(&fifo->txsz, 0x0fe00004);*/ + /*out_be32(&fifo->rxsz, 0x0ff00004);*/ + + sicr = 0x01000000 | /* SIM = 0001 -- 8 bit */ + 0x00800000 | /* GenClk = 1 -- internal clk */ + 0x00008000 | /* SPI = 1 */ + 0x00004000 | /* MSTR = 1 -- SPI master */ + 0x00000800; /* UseEOF = 1 -- SS low until EOF */ + + out_be32(&psc->sicr, sicr); + + ccr = in_be32(&psc->ccr); + ccr &= 0xFF000000; + bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ + ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); + out_be32(&psc->ccr, ccr); + + /* Set 2ms DTL delay */ + out_8(&psc->ctur, 0x00); + out_8(&psc->ctlr, 0x82); + + /* we don't use the alarms */ + out_be32(&fifo->rxalarm, 0xfff); + out_be32(&fifo->txalarm, 0); + + /* Enable FIFO slices for Rx/Tx */ + out_be32(&fifo->rxcmd, + MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); + out_be32(&fifo->txcmd, + MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); + + mps->bits_per_word = 8; + + return ret; +} + +static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id) +{ + struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + + /* clear interrupt and wake up the work queue */ + if (in_be32(&fifo->txisr) & + in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) { + out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); + out_be32(&fifo->tximr, 0); + complete(&mps->done); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/* bus_num is used only for the case dev->platform_data == NULL */ +static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, + u32 size, unsigned int irq, + s16 bus_num) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct mpc512x_psc_spi *mps; + struct spi_master *master; + int ret; + void *tempp; + + master = spi_alloc_master(dev, sizeof *mps); + if (master == NULL) + return -ENOMEM; + + dev_set_drvdata(dev, master); + mps = spi_master_get_devdata(master); + mps->irq = irq; + + if (pdata == NULL) { + dev_err(dev, "probe called without platform data, no " + "cs_control function will be called\n"); + mps->cs_control = NULL; + mps->sysclk = 0; + master->bus_num = bus_num; + master->num_chipselect = 255; + } else { + mps->cs_control = pdata->cs_control; + mps->sysclk = pdata->sysclk; + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->max_chipselect; + } + + master->setup = mpc512x_psc_spi_setup; + master->transfer = mpc512x_psc_spi_transfer; + master->cleanup = mpc512x_psc_spi_cleanup; + master->dev.of_node = dev->of_node; + + tempp = ioremap(regaddr, size); + if (!tempp) { + dev_err(dev, "could not ioremap I/O port range\n"); + ret = -EFAULT; + goto free_master; + } + mps->psc = tempp; + mps->fifo = + (struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc)); + + ret = request_irq(mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED, + "mpc512x-psc-spi", mps); + if (ret) + goto free_master; + + ret = mpc512x_psc_spi_port_config(master, mps); + if (ret < 0) + goto free_irq; + + spin_lock_init(&mps->lock); + init_completion(&mps->done); + INIT_WORK(&mps->work, mpc512x_psc_spi_work); + INIT_LIST_HEAD(&mps->queue); + + mps->workqueue = + create_singlethread_workqueue(dev_name(master->dev.parent)); + if (mps->workqueue == NULL) { + ret = -EBUSY; + goto free_irq; + } + + ret = spi_register_master(master); + if (ret < 0) + goto unreg_master; + + return ret; + +unreg_master: + destroy_workqueue(mps->workqueue); +free_irq: + free_irq(mps->irq, mps); +free_master: + if (mps->psc) + iounmap(mps->psc); + spi_master_put(master); + + return ret; +} + +static int __devexit mpc512x_psc_spi_do_remove(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); + + flush_workqueue(mps->workqueue); + destroy_workqueue(mps->workqueue); + spi_unregister_master(master); + free_irq(mps->irq, mps); + if (mps->psc) + iounmap(mps->psc); + + return 0; +} + +static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op, + const struct of_device_id *match) +{ + const u32 *regaddr_p; + u64 regaddr64, size64; + s16 id = -1; + + regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); + if (!regaddr_p) { + dev_err(&op->dev, "Invalid PSC address\n"); + return -EINVAL; + } + regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); + + /* get PSC id (0..11, used by port_config) */ + if (op->dev.platform_data == NULL) { + const u32 *psc_nump; + + psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL); + if (!psc_nump || *psc_nump > 11) { + dev_err(&op->dev, "mpc512x_psc_spi: Device node %s " + "has invalid cell-index property\n", + op->dev.of_node->full_name); + return -EINVAL; + } + id = *psc_nump; + } + + return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64, + irq_of_parse_and_map(op->dev.of_node, 0), id); +} + +static int __devexit mpc512x_psc_spi_of_remove(struct platform_device *op) +{ + return mpc512x_psc_spi_do_remove(&op->dev); +} + +static struct of_device_id mpc512x_psc_spi_of_match[] = { + { .compatible = "fsl,mpc5121-psc-spi", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); + +static struct of_platform_driver mpc512x_psc_spi_of_driver = { + .probe = mpc512x_psc_spi_of_probe, + .remove = __devexit_p(mpc512x_psc_spi_of_remove), + .driver = { + .name = "mpc512x-psc-spi", + .owner = THIS_MODULE, + .of_match_table = mpc512x_psc_spi_of_match, + }, +}; + +static int __init mpc512x_psc_spi_init(void) +{ + return of_register_platform_driver(&mpc512x_psc_spi_of_driver); +} +module_init(mpc512x_psc_spi_init); + +static void __exit mpc512x_psc_spi_exit(void) +{ + of_unregister_platform_driver(&mpc512x_psc_spi_of_driver); +} +module_exit(mpc512x_psc_spi_exit); + +MODULE_AUTHOR("John Rigby"); +MODULE_DESCRIPTION("MPC512x PSC SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index f50c81df336..983fbbfce76 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c @@ -16,14 +16,15 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/interrupt.h> +#include <linux/of_address.h> #include <linux/of_platform.h> -#include <linux/of_spi.h> #include <linux/workqueue.h> #include <linux/completion.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/fsl_devices.h> +#include <linux/slab.h> #include <asm/mpc52xx.h> #include <asm/mpc52xx_psc.h> @@ -397,6 +398,7 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, master->setup = mpc52xx_psc_spi_setup; master->transfer = mpc52xx_psc_spi_transfer; master->cleanup = mpc52xx_psc_spi_cleanup; + master->dev.of_node = dev->of_node; mps->psc = ioremap(regaddr, size); if (!mps->psc) { @@ -463,26 +465,25 @@ static int __exit mpc52xx_psc_spi_do_remove(struct device *dev) return 0; } -static int __init mpc52xx_psc_spi_of_probe(struct of_device *op, +static int __init mpc52xx_psc_spi_of_probe(struct platform_device *op, const struct of_device_id *match) { const u32 *regaddr_p; u64 regaddr64, size64; s16 id = -1; - int rc; - regaddr_p = of_get_address(op->node, 0, &size64, NULL); + regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); if (!regaddr_p) { dev_err(&op->dev, "Invalid PSC address\n"); return -EINVAL; } - regaddr64 = of_translate_address(op->node, regaddr_p); + regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); /* get PSC id (1..6, used by port_config) */ if (op->dev.platform_data == NULL) { const u32 *psc_nump; - psc_nump = of_get_property(op->node, "cell-index", NULL); + psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL); if (!psc_nump || *psc_nump > 5) { dev_err(&op->dev, "Invalid cell-index property\n"); return -EINVAL; @@ -490,20 +491,16 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op, id = *psc_nump + 1; } - rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, - irq_of_parse_and_map(op->node, 0), id); - if (rc == 0) - of_register_spi_devices(dev_get_drvdata(&op->dev), op->node); - - return rc; + return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, + irq_of_parse_and_map(op->dev.of_node, 0), id); } -static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) +static int __exit mpc52xx_psc_spi_of_remove(struct platform_device *op) { return mpc52xx_psc_spi_do_remove(&op->dev); } -static struct of_device_id mpc52xx_psc_spi_of_match[] = { +static const struct of_device_id mpc52xx_psc_spi_of_match[] = { { .compatible = "fsl,mpc5200-psc-spi", }, { .compatible = "mpc5200-psc-spi", }, /* old */ {} @@ -512,14 +509,12 @@ static struct of_device_id mpc52xx_psc_spi_of_match[] = { MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match); static struct of_platform_driver mpc52xx_psc_spi_of_driver = { - .owner = THIS_MODULE, - .name = "mpc52xx-psc-spi", - .match_table = mpc52xx_psc_spi_of_match, .probe = mpc52xx_psc_spi_of_probe, .remove = __exit_p(mpc52xx_psc_spi_of_remove), .driver = { .name = "mpc52xx-psc-spi", .owner = THIS_MODULE, + .of_match_table = mpc52xx_psc_spi_of_match, }, }; diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c index 45bfe645817..ec9f0b1bf86 100644 --- a/drivers/spi/mpc52xx_spi.c +++ b/drivers/spi/mpc52xx_spi.c @@ -18,9 +18,9 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/spi/spi.h> -#include <linux/of_spi.h> #include <linux/io.h> #include <linux/of_gpio.h> +#include <linux/slab.h> #include <asm/time.h> #include <asm/mpc52xx.h> @@ -390,7 +390,7 @@ static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m) /* * OF Platform Bus Binding */ -static int __devinit mpc52xx_spi_probe(struct of_device *op, +static int __devinit mpc52xx_spi_probe(struct platform_device *op, const struct of_device_id *match) { struct spi_master *master; @@ -402,7 +402,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, /* MMIO registers */ dev_dbg(&op->dev, "probing mpc5200 SPI device\n"); - regs = of_iomap(op->node, 0); + regs = of_iomap(op->dev.of_node, 0); if (!regs) return -ENODEV; @@ -438,17 +438,18 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, master->setup = mpc52xx_spi_setup; master->transfer = mpc52xx_spi_transfer; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; + master->dev.of_node = op->dev.of_node; dev_set_drvdata(&op->dev, master); ms = spi_master_get_devdata(master); ms->master = master; ms->regs = regs; - ms->irq0 = irq_of_parse_and_map(op->node, 0); - ms->irq1 = irq_of_parse_and_map(op->node, 1); + ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0); + ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1); ms->state = mpc52xx_spi_fsmstate_idle; - ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node); - ms->gpio_cs_count = of_gpio_count(op->node); + ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node); + ms->gpio_cs_count = of_gpio_count(op->dev.of_node); if (ms->gpio_cs_count > 0) { master->num_chipselect = ms->gpio_cs_count; ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int), @@ -459,7 +460,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, } for (i = 0; i < ms->gpio_cs_count; i++) { - gpio_cs = of_get_gpio(op->node, i); + gpio_cs = of_get_gpio(op->dev.of_node, i); if (gpio_cs < 0) { dev_err(&op->dev, "could not parse the gpio field " @@ -511,7 +512,6 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, if (rc) goto err_register; - of_register_spi_devices(master, op->node); dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n"); return rc; @@ -530,7 +530,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, return rc; } -static int __devexit mpc52xx_spi_remove(struct of_device *op) +static int __devexit mpc52xx_spi_remove(struct platform_device *op) { struct spi_master *master = dev_get_drvdata(&op->dev); struct mpc52xx_spi *ms = spi_master_get_devdata(master); @@ -550,16 +550,18 @@ static int __devexit mpc52xx_spi_remove(struct of_device *op) return 0; } -static struct of_device_id mpc52xx_spi_match[] __devinitdata = { +static const struct of_device_id mpc52xx_spi_match[] __devinitconst = { { .compatible = "fsl,mpc5200-spi", }, {} }; MODULE_DEVICE_TABLE(of, mpc52xx_spi_match); static struct of_platform_driver mpc52xx_spi_of_driver = { - .owner = THIS_MODULE, - .name = "mpc52xx-spi", - .match_table = mpc52xx_spi_match, + .driver = { + .name = "mpc52xx-spi", + .owner = THIS_MODULE, + .of_match_table = mpc52xx_spi_match, + }, .probe = mpc52xx_spi_probe, .remove = __exit_p(mpc52xx_spi_remove), }; diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index bf5f95a1941..2a651e61bfb 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c @@ -32,12 +32,13 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <plat/dma.h> #include <plat/clock.h> - +#include <plat/mcspi.h> #define OMAP2_MCSPI_MAX_FREQ 48000000 @@ -112,7 +113,7 @@ struct omap2_mcspi_dma { /* use PIO for small transfers, avoiding DMA setup/teardown overhead and * cache operations; better heuristics consider wordsize and bitrate. */ -#define DMA_MIN_BYTES 8 +#define DMA_MIN_BYTES 160 struct omap2_mcspi { @@ -203,6 +204,7 @@ static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val) cs->chconf0 = val; mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val); + mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); } static void omap2_mcspi_set_dma_req(const struct spi_device *spi, @@ -227,6 +229,8 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable) l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0; mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l); + /* Flash post-writes */ + mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0); } static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) @@ -292,6 +296,19 @@ static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) return 0; } +static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(1000); + while (!(__raw_readl(reg) & bit)) { + if (time_after(jiffies, timeout)) + return -1; + cpu_relax(); + } + return 0; +} + static unsigned omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) { @@ -301,11 +318,17 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) unsigned int count, c; unsigned long base, tx_reg, rx_reg; int word_len, data_type, element_count; + int elements; + u32 l; u8 * rx; const u8 * tx; + void __iomem *chstat_reg; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + l = mcspi_cached_chconf0(spi); + + chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; count = xfer->len; c = count; @@ -344,8 +367,12 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) } if (rx != NULL) { + elements = element_count - 1; + if (l & OMAP2_MCSPI_CHCONF_TURBO) + elements--; + omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, - data_type, element_count - 1, 1, + data_type, elements, 1, OMAP_DMA_SYNC_ELEMENT, mcspi_dma->dma_rx_sync_dev, 1); @@ -371,23 +398,58 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) if (tx != NULL) { wait_for_completion(&mcspi_dma->dma_tx_completion); dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE); + + /* for TX_ONLY mode, be sure all words have shifted out */ + if (rx == NULL) { + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_TXS) < 0) + dev_err(&spi->dev, "TXS timed out\n"); + else if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_EOT) < 0) + dev_err(&spi->dev, "EOT timed out\n"); + } } if (rx != NULL) { wait_for_completion(&mcspi_dma->dma_rx_completion); dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); omap2_mcspi_set_enable(spi, 0); + + if (l & OMAP2_MCSPI_CHCONF_TURBO) { + + if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) + & OMAP2_MCSPI_CHSTAT_RXS)) { + u32 w; + + w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); + if (word_len <= 8) + ((u8 *)xfer->rx_buf)[elements++] = w; + else if (word_len <= 16) + ((u16 *)xfer->rx_buf)[elements++] = w; + else /* word_len <= 32 */ + ((u32 *)xfer->rx_buf)[elements++] = w; + } else { + dev_err(&spi->dev, + "DMA RX penultimate word empty"); + count -= (word_len <= 8) ? 2 : + (word_len <= 16) ? 4 : + /* word_len <= 32 */ 8; + omap2_mcspi_set_enable(spi, 1); + return count; + } + } + if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) & OMAP2_MCSPI_CHSTAT_RXS)) { u32 w; w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); if (word_len <= 8) - ((u8 *)xfer->rx_buf)[element_count - 1] = w; + ((u8 *)xfer->rx_buf)[elements] = w; else if (word_len <= 16) - ((u16 *)xfer->rx_buf)[element_count - 1] = w; + ((u16 *)xfer->rx_buf)[elements] = w; else /* word_len <= 32 */ - ((u32 *)xfer->rx_buf)[element_count - 1] = w; + ((u32 *)xfer->rx_buf)[elements] = w; } else { dev_err(&spi->dev, "DMA RX last word empty"); count -= (word_len <= 8) ? 1 : @@ -399,19 +461,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) return count; } -static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) -{ - unsigned long timeout; - - timeout = jiffies + msecs_to_jiffies(1000); - while (!(__raw_readl(reg) & bit)) { - if (time_after(jiffies, timeout)) - return -1; - cpu_relax(); - } - return 0; -} - static unsigned omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) { @@ -431,7 +480,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) word_len = cs->word_len; l = mcspi_cached_chconf0(spi); - l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; /* We store the pre-calculated register addresses on stack to speed * up the transfer loop. */ @@ -454,10 +502,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "TXS timed out\n"); goto out; } -#ifdef VERBOSE - dev_dbg(&spi->dev, "write-%d %02x\n", + dev_vdbg(&spi->dev, "write-%d %02x\n", word_len, *tx); -#endif __raw_writel(*tx++, tx_reg); } if (rx != NULL) { @@ -466,16 +512,27 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "RXS timed out\n"); goto out; } - /* prevent last RX_ONLY read from triggering - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) - mcspi_write_chconf0(spi, l); + + if (c == 1 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %02x\n", + word_len, *(rx - 1)); + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %02x\n", + dev_vdbg(&spi->dev, "read-%d %02x\n", word_len, *(rx - 1)); -#endif } } while (c); } else if (word_len <= 16) { @@ -492,10 +549,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "TXS timed out\n"); goto out; } -#ifdef VERBOSE - dev_dbg(&spi->dev, "write-%d %04x\n", + dev_vdbg(&spi->dev, "write-%d %04x\n", word_len, *tx); -#endif __raw_writel(*tx++, tx_reg); } if (rx != NULL) { @@ -504,16 +559,27 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "RXS timed out\n"); goto out; } - /* prevent last RX_ONLY read from triggering - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) - mcspi_write_chconf0(spi, l); + + if (c == 2 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %04x\n", + word_len, *(rx - 1)); + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %04x\n", + dev_vdbg(&spi->dev, "read-%d %04x\n", word_len, *(rx - 1)); -#endif } } while (c); } else if (word_len <= 32) { @@ -530,10 +596,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "TXS timed out\n"); goto out; } -#ifdef VERBOSE - dev_dbg(&spi->dev, "write-%d %04x\n", + dev_vdbg(&spi->dev, "write-%d %08x\n", word_len, *tx); -#endif __raw_writel(*tx++, tx_reg); } if (rx != NULL) { @@ -542,16 +606,27 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "RXS timed out\n"); goto out; } - /* prevent last RX_ONLY read from triggering - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) - mcspi_write_chconf0(spi, l); + + if (c == 4 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %08x\n", + word_len, *(rx - 1)); + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %04x\n", + dev_vdbg(&spi->dev, "read-%d %08x\n", word_len, *(rx - 1)); -#endif } } while (c); } @@ -564,8 +639,15 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) } else if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_EOT) < 0) dev_err(&spi->dev, "EOT timed out\n"); + + /* disable chan to purge rx datas received in TX_ONLY transfer, + * otherwise these rx datas will affect the direct following + * RX_ONLY transfer. + */ + omap2_mcspi_set_enable(spi, 0); } out: + omap2_mcspi_set_enable(spi, 1); return count - c; } @@ -578,6 +660,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, struct spi_master *spi_cntrl; u32 l = 0, div = 0; u8 word_len = spi->bits_per_word; + u32 speed_hz = spi->max_speed_hz; mcspi = spi_master_get_devdata(spi->master); spi_cntrl = mcspi->master; @@ -587,9 +670,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, cs->word_len = word_len; - if (spi->max_speed_hz) { + if (t && t->speed_hz) + speed_hz = t->speed_hz; + + if (speed_hz) { while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div)) - > spi->max_speed_hz) + > speed_hz) div++; } else div = 15; @@ -749,21 +835,26 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) struct omap2_mcspi_cs *cs; mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - /* Unlink controller state from context save list */ - cs = spi->controller_state; - list_del(&cs->node); - - kfree(spi->controller_state); + if (spi->controller_state) { + /* Unlink controller state from context save list */ + cs = spi->controller_state; + list_del(&cs->node); - if (mcspi_dma->dma_rx_channel != -1) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; + kfree(spi->controller_state); } - if (mcspi_dma->dma_tx_channel != -1) { - omap_free_dma(mcspi_dma->dma_tx_channel); - mcspi_dma->dma_tx_channel = -1; + + if (spi->chip_select < spi->master->num_chipselect) { + mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + if (mcspi_dma->dma_rx_channel != -1) { + omap_free_dma(mcspi_dma->dma_rx_channel); + mcspi_dma->dma_rx_channel = -1; + } + if (mcspi_dma->dma_tx_channel != -1) { + omap_free_dma(mcspi_dma->dma_tx_channel); + mcspi_dma->dma_tx_channel = -1; + } } } @@ -789,6 +880,7 @@ static void omap2_mcspi_work(struct work_struct *work) struct spi_transfer *t = NULL; int cs_active = 0; struct omap2_mcspi_cs *cs; + struct omap2_mcspi_device_config *cd; int par_override = 0; int status = 0; u32 chconf; @@ -801,6 +893,7 @@ static void omap2_mcspi_work(struct work_struct *work) spi = m->spi; cs = spi->controller_state; + cd = spi->controller_data; omap2_mcspi_set_enable(spi, 1); list_for_each_entry(t, &m->transfers, transfer_list) { @@ -824,10 +917,19 @@ static void omap2_mcspi_work(struct work_struct *work) chconf = mcspi_cached_chconf0(spi); chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; + chconf &= ~OMAP2_MCSPI_CHCONF_TURBO; + if (t->tx_buf == NULL) chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY; else if (t->rx_buf == NULL) chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY; + + if (cd && cd->turbo_mode && t->tx_buf == NULL) { + /* Turbo mode is for more than one word */ + if (t->len > ((cs->word_len + 7) >> 3)) + chconf |= OMAP2_MCSPI_CHCONF_TURBO; + } + mcspi_write_chconf0(spi, chconf); if (t->len) { @@ -1014,7 +1116,7 @@ static u8 __initdata spi2_txdma_id[] = { OMAP24XX_DMA_SPI2_TX1, }; -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) \ +#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ || defined(CONFIG_ARCH_OMAP4) static u8 __initdata spi3_rxdma_id[] = { OMAP24XX_DMA_SPI3_RX0, diff --git a/drivers/spi/omap_spi_100k.c b/drivers/spi/omap_spi_100k.c index 5355d90d1be..9bd1c92ad96 100644 --- a/drivers/spi/omap_spi_100k.c +++ b/drivers/spi/omap_spi_100k.c @@ -33,6 +33,7 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/gpio.h> +#include <linux/slab.h> #include <linux/spi/spi.h> @@ -140,7 +141,12 @@ static void spi100k_write_data(struct spi_master *master, int len, int data) { struct omap1_spi100k *spi100k = spi_master_get_devdata(master); - /* write 16-bit word */ + /* write 16-bit word, shifting 8-bit data if necessary */ + if (len <= 8) { + data <<= 8; + len = 16; + } + spi100k_enable_clock(master); writew( data , spi100k->base + SPI_TX_MSB); @@ -161,6 +167,10 @@ static int spi100k_read_data(struct spi_master *master, int len) int dataH,dataL; struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + /* Always do at least 16 bits */ + if (len <= 8) + len = 16; + spi100k_enable_clock(master); writew(SPI_CTRL_SEN(0) | SPI_CTRL_WORD_SIZE(len) | @@ -213,10 +223,6 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) c = count; word_len = cs->word_len; - /* RX_ONLY mode needs dummy data in TX reg */ - if (xfer->tx_buf == NULL) - spi100k_write_data(spi->master,word_len, 0); - if (word_len <= 8) { u8 *rx; const u8 *tx; @@ -226,9 +232,9 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) do { c-=1; if (xfer->tx_buf != NULL) - spi100k_write_data(spi->master,word_len, *tx); + spi100k_write_data(spi->master, word_len, *tx++); if (xfer->rx_buf != NULL) - *rx = spi100k_read_data(spi->master,word_len); + *rx++ = spi100k_read_data(spi->master, word_len); } while(c); } else if (word_len <= 16) { u16 *rx; @@ -379,10 +385,6 @@ static void omap1_spi100k_work(struct work_struct *work) if (t->len) { unsigned count; - /* RX_ONLY mode needs dummy data in TX reg */ - if (t->tx_buf == NULL) - spi100k_write_data(spi->master, 8, 0); - count = omap1_spi100k_txrx_pio(spi, t); m->actual_length += count; diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c index 6c3a8557db2..160d3266205 100644 --- a/drivers/spi/omap_uwire.c +++ b/drivers/spi/omap_uwire.c @@ -41,6 +41,7 @@ #include <linux/interrupt.h> #include <linux/err.h> #include <linux/clk.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c index 3aea50da7b2..0b677dc041a 100644 --- a/drivers/spi/orion_spi.c +++ b/drivers/spi/orion_spi.c @@ -404,7 +404,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) goto msg_rejected; } - if ((t != NULL) && t->bits_per_word) + if (t->bits_per_word) bits_per_word = t->bits_per_word; if ((bits_per_word != 8) && (bits_per_word != 16)) { @@ -415,7 +415,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) goto msg_rejected; } /*make sure buffer length is even when working in 16 bit mode*/ - if ((t != NULL) && (t->bits_per_word == 16) && (t->len & 1)) { + if ((t->bits_per_word == 16) && (t->len & 1)) { dev_err(&spi->dev, "message rejected : " "odd data length (%d) while in 16 bit mode\n", diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index c2f707e5ce7..e76b1afafe0 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c @@ -29,14 +29,14 @@ #include <linux/delay.h> #include <linux/clk.h> #include <linux/gpio.h> +#include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/delay.h> #include <mach/dma.h> -#include <mach/regs-ssp.h> -#include <mach/ssp.h> +#include <plat/ssp.h> #include <mach/pxa2xx_spi.h> MODULE_AUTHOR("Stephen Street"); @@ -1317,14 +1317,14 @@ static int setup(struct spi_device *spi) /* NOTE: PXA25x_SSP _could_ use external clocking ... */ if (drv_data->ssp_type != PXA25x_SSP) dev_dbg(&spi->dev, "%ld Hz actual, %s\n", - clk_get_rate(ssp->clk) - / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), - chip->enable_dma ? "DMA" : "PIO"); + clk_get_rate(ssp->clk) + / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), + chip->enable_dma ? "DMA" : "PIO"); else dev_dbg(&spi->dev, "%ld Hz actual, %s\n", - clk_get_rate(ssp->clk) / 2 - / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), - chip->enable_dma ? "DMA" : "PIO"); + clk_get_rate(ssp->clk) / 2 + / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)), + chip->enable_dma ? "DMA" : "PIO"); if (spi->bits_per_word <= 8) { chip->n_bytes = 1; @@ -1465,7 +1465,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) platform_info = dev->platform_data; - ssp = ssp_request(pdev->id, pdev->name); + ssp = pxa_ssp_request(pdev->id, pdev->name); if (ssp == NULL) { dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id); return -ENODEV; @@ -1475,7 +1475,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); if (!master) { dev_err(&pdev->dev, "cannot alloc spi_master\n"); - ssp_free(ssp); + pxa_ssp_free(ssp); return -ENOMEM; } drv_data = spi_master_get_devdata(master); @@ -1557,7 +1557,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | SSCR1_TxTresh(TX_THRESH_DFLT), drv_data->ioaddr); - write_SSCR0(SSCR0_SerClkDiv(2) + write_SSCR0(SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8), drv_data->ioaddr); @@ -1604,7 +1604,7 @@ out_error_irq_alloc: out_error_master_alloc: spi_master_put(master); - ssp_free(ssp); + pxa_ssp_free(ssp); return status; } @@ -1648,7 +1648,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) free_irq(ssp->irq, drv_data); /* Release SSP */ - ssp_free(ssp); + pxa_ssp_free(ssp); /* Disconnect from the SPI framework */ spi_unregister_master(drv_data->master); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b76f2468a84..b5a78a1f442 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -23,8 +23,11 @@ #include <linux/init.h> #include <linux/cache.h> #include <linux/mutex.h> +#include <linux/of_device.h> +#include <linux/slab.h> #include <linux/mod_devicetable.h> #include <linux/spi/spi.h> +#include <linux/of_spi.h> /* SPI bustype and spi_master class are registered after board init code @@ -40,7 +43,7 @@ static void spidev_release(struct device *dev) spi->master->cleanup(spi); spi_master_put(spi->master); - kfree(dev); + kfree(spi); } static ssize_t @@ -84,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv) const struct spi_device *spi = to_spi_device(dev); const struct spi_driver *sdrv = to_spi_driver(drv); + /* Attempt an OF style match */ + if (of_driver_match_device(dev, drv)) + return 1; + if (sdrv->id_table) return !!spi_match_id(sdrv->id_table, spi); @@ -256,6 +263,7 @@ int spi_add_device(struct spi_device *spi) { static DEFINE_MUTEX(spi_add_lock); struct device *dev = spi->master->dev.parent; + struct device *d; int status; /* Chipselects are numbered 0..max; validate. */ @@ -277,10 +285,11 @@ int spi_add_device(struct spi_device *spi) */ mutex_lock(&spi_add_lock); - if (bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)) - != NULL) { + d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)); + if (d != NULL) { dev_err(dev, "chipselect %d already in use\n", spi->chip_select); + put_device(d); status = -EBUSY; goto done; } @@ -524,6 +533,10 @@ int spi_register_master(struct spi_master *master) dynamic = 1; } + spin_lock_init(&master->bus_lock_spinlock); + mutex_init(&master->bus_lock_mutex); + master->bus_lock_flag = 0; + /* register the device, then userspace will see it. * registration fails if the bus ID is in use. */ @@ -537,17 +550,18 @@ int spi_register_master(struct spi_master *master) /* populate children from any spi device tables */ scan_boardinfo(master); status = 0; + + /* Register devices from the device tree */ + of_register_spi_devices(master); done: return status; } EXPORT_SYMBOL_GPL(spi_register_master); -static int __unregister(struct device *dev, void *master_dev) +static int __unregister(struct device *dev, void *null) { - /* note: before about 2.6.14-rc1 this would corrupt memory: */ - if (dev != master_dev) - spi_unregister_device(to_spi_device(dev)); + spi_unregister_device(to_spi_device(dev)); return 0; } @@ -565,8 +579,7 @@ void spi_unregister_master(struct spi_master *master) { int dummy; - dummy = device_for_each_child(master->dev.parent, &master->dev, - __unregister); + dummy = device_for_each_child(&master->dev, NULL, __unregister); device_unregister(&master->dev); } EXPORT_SYMBOL_GPL(spi_unregister_master); @@ -663,6 +676,35 @@ int spi_setup(struct spi_device *spi) } EXPORT_SYMBOL_GPL(spi_setup); +static int __spi_async(struct spi_device *spi, struct spi_message *message) +{ + struct spi_master *master = spi->master; + + /* Half-duplex links include original MicroWire, and ones with + * only one data pin like SPI_3WIRE (switches direction) or where + * either MOSI or MISO is missing. They can also be caused by + * software limitations. + */ + if ((master->flags & SPI_MASTER_HALF_DUPLEX) + || (spi->mode & SPI_3WIRE)) { + struct spi_transfer *xfer; + unsigned flags = master->flags; + + list_for_each_entry(xfer, &message->transfers, transfer_list) { + if (xfer->rx_buf && xfer->tx_buf) + return -EINVAL; + if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) + return -EINVAL; + if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) + return -EINVAL; + } + } + + message->spi = spi; + message->status = -EINPROGRESS; + return master->transfer(spi, message); +} + /** * spi_async - asynchronous SPI transfer * @spi: device with which data will be exchanged @@ -695,33 +737,68 @@ EXPORT_SYMBOL_GPL(spi_setup); int spi_async(struct spi_device *spi, struct spi_message *message) { struct spi_master *master = spi->master; + int ret; + unsigned long flags; - /* Half-duplex links include original MicroWire, and ones with - * only one data pin like SPI_3WIRE (switches direction) or where - * either MOSI or MISO is missing. They can also be caused by - * software limitations. - */ - if ((master->flags & SPI_MASTER_HALF_DUPLEX) - || (spi->mode & SPI_3WIRE)) { - struct spi_transfer *xfer; - unsigned flags = master->flags; + spin_lock_irqsave(&master->bus_lock_spinlock, flags); - list_for_each_entry(xfer, &message->transfers, transfer_list) { - if (xfer->rx_buf && xfer->tx_buf) - return -EINVAL; - if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) - return -EINVAL; - if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) - return -EINVAL; - } - } + if (master->bus_lock_flag) + ret = -EBUSY; + else + ret = __spi_async(spi, message); - message->spi = spi; - message->status = -EINPROGRESS; - return master->transfer(spi, message); + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + + return ret; } EXPORT_SYMBOL_GPL(spi_async); +/** + * spi_async_locked - version of spi_async with exclusive bus usage + * @spi: device with which data will be exchanged + * @message: describes the data transfers, including completion callback + * Context: any (irqs may be blocked, etc) + * + * This call may be used in_irq and other contexts which can't sleep, + * as well as from task contexts which can sleep. + * + * The completion callback is invoked in a context which can't sleep. + * Before that invocation, the value of message->status is undefined. + * When the callback is issued, message->status holds either zero (to + * indicate complete success) or a negative error code. After that + * callback returns, the driver which issued the transfer request may + * deallocate the associated memory; it's no longer in use by any SPI + * core or controller driver code. + * + * Note that although all messages to a spi_device are handled in + * FIFO order, messages may go to different devices in other orders. + * Some device might be higher priority, or have various "hard" access + * time requirements, for example. + * + * On detection of any fault during the transfer, processing of + * the entire message is aborted, and the device is deselected. + * Until returning from the associated message completion callback, + * no other spi_message queued to that device will be processed. + * (This rule applies equally to all the synchronous transfer calls, + * which are wrappers around this core asynchronous primitive.) + */ +int spi_async_locked(struct spi_device *spi, struct spi_message *message) +{ + struct spi_master *master = spi->master; + int ret; + unsigned long flags; + + spin_lock_irqsave(&master->bus_lock_spinlock, flags); + + ret = __spi_async(spi, message); + + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + + return ret; + +} +EXPORT_SYMBOL_GPL(spi_async_locked); + /*-------------------------------------------------------------------------*/ @@ -735,6 +812,32 @@ static void spi_complete(void *arg) complete(arg); } +static int __spi_sync(struct spi_device *spi, struct spi_message *message, + int bus_locked) +{ + DECLARE_COMPLETION_ONSTACK(done); + int status; + struct spi_master *master = spi->master; + + message->complete = spi_complete; + message->context = &done; + + if (!bus_locked) + mutex_lock(&master->bus_lock_mutex); + + status = spi_async_locked(spi, message); + + if (!bus_locked) + mutex_unlock(&master->bus_lock_mutex); + + if (status == 0) { + wait_for_completion(&done); + status = message->status; + } + message->context = NULL; + return status; +} + /** * spi_sync - blocking/synchronous SPI data transfers * @spi: device with which data will be exchanged @@ -758,21 +861,86 @@ static void spi_complete(void *arg) */ int spi_sync(struct spi_device *spi, struct spi_message *message) { - DECLARE_COMPLETION_ONSTACK(done); - int status; - - message->complete = spi_complete; - message->context = &done; - status = spi_async(spi, message); - if (status == 0) { - wait_for_completion(&done); - status = message->status; - } - message->context = NULL; - return status; + return __spi_sync(spi, message, 0); } EXPORT_SYMBOL_GPL(spi_sync); +/** + * spi_sync_locked - version of spi_sync with exclusive bus usage + * @spi: device with which data will be exchanged + * @message: describes the data transfers + * Context: can sleep + * + * This call may only be used from a context that may sleep. The sleep + * is non-interruptible, and has no timeout. Low-overhead controller + * drivers may DMA directly into and out of the message buffers. + * + * This call should be used by drivers that require exclusive access to the + * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must + * be released by a spi_bus_unlock call when the exclusive access is over. + * + * It returns zero on success, else a negative error code. + */ +int spi_sync_locked(struct spi_device *spi, struct spi_message *message) +{ + return __spi_sync(spi, message, 1); +} +EXPORT_SYMBOL_GPL(spi_sync_locked); + +/** + * spi_bus_lock - obtain a lock for exclusive SPI bus usage + * @master: SPI bus master that should be locked for exclusive bus access + * Context: can sleep + * + * This call may only be used from a context that may sleep. The sleep + * is non-interruptible, and has no timeout. + * + * This call should be used by drivers that require exclusive access to the + * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the + * exclusive access is over. Data transfer must be done by spi_sync_locked + * and spi_async_locked calls when the SPI bus lock is held. + * + * It returns zero on success, else a negative error code. + */ +int spi_bus_lock(struct spi_master *master) +{ + unsigned long flags; + + mutex_lock(&master->bus_lock_mutex); + + spin_lock_irqsave(&master->bus_lock_spinlock, flags); + master->bus_lock_flag = 1; + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + + /* mutex remains locked until spi_bus_unlock is called */ + + return 0; +} +EXPORT_SYMBOL_GPL(spi_bus_lock); + +/** + * spi_bus_unlock - release the lock for exclusive SPI bus usage + * @master: SPI bus master that was locked for exclusive bus access + * Context: can sleep + * + * This call may only be used from a context that may sleep. The sleep + * is non-interruptible, and has no timeout. + * + * This call releases an SPI bus lock previously obtained by an spi_bus_lock + * call. + * + * It returns zero on success, else a negative error code. + */ +int spi_bus_unlock(struct spi_master *master) +{ + master->bus_lock_flag = 0; + + mutex_unlock(&master->bus_lock_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(spi_bus_unlock); + /* portable code must never pass more than 32 bytes */ #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c index 1d41058bbab..ab483a0ec6d 100644 --- a/drivers/spi/spi_bfin5xx.c +++ b/drivers/spi/spi_bfin5xx.c @@ -1,7 +1,7 @@ /* * Blackfin On-Chip SPI Driver * - * Copyright 2004-2007 Analog Devices Inc. + * Copyright 2004-2010 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/slab.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irq.h> @@ -40,13 +41,16 @@ MODULE_LICENSE("GPL"); #define RUNNING_STATE ((void *)1) #define DONE_STATE ((void *)2) #define ERROR_STATE ((void *)-1) -#define QUEUE_RUNNING 0 -#define QUEUE_STOPPED 1 -/* Value to send if no TX value is supplied */ -#define SPI_IDLE_TXVAL 0x0000 +struct bfin_spi_master_data; -struct driver_data { +struct bfin_spi_transfer_ops { + void (*write) (struct bfin_spi_master_data *); + void (*read) (struct bfin_spi_master_data *); + void (*duplex) (struct bfin_spi_master_data *); +}; + +struct bfin_spi_master_data { /* Driver model hookup */ struct platform_device *pdev; @@ -68,7 +72,7 @@ struct driver_data { spinlock_t lock; struct list_head queue; int busy; - int run; + bool running; /* Message Transfer pump */ struct tasklet_struct pump_transfers; @@ -76,7 +80,7 @@ struct driver_data { /* Current message transfer state info */ struct spi_message *cur_msg; struct spi_transfer *cur_transfer; - struct chip_data *cur_chip; + struct bfin_spi_slave_data *cur_chip; size_t len_in_bytes; size_t len; void *tx; @@ -91,38 +95,37 @@ struct driver_data { dma_addr_t rx_dma; dma_addr_t tx_dma; + int irq_requested; + int spi_irq; + size_t rx_map_len; size_t tx_map_len; u8 n_bytes; + u16 ctrl_reg; + u16 flag_reg; + int cs_change; - void (*write) (struct driver_data *); - void (*read) (struct driver_data *); - void (*duplex) (struct driver_data *); + const struct bfin_spi_transfer_ops *ops; }; -struct chip_data { +struct bfin_spi_slave_data { u16 ctl_reg; u16 baud; u16 flag; u8 chip_select_num; - u8 n_bytes; - u8 width; /* 0 or 1 */ u8 enable_dma; - u8 bits_per_word; /* 8 or 16 */ - u8 cs_change_per_word; u16 cs_chg_udelay; /* Some devices require > 255usec delay */ u32 cs_gpio; u16 idle_tx_val; - void (*write) (struct driver_data *); - void (*read) (struct driver_data *); - void (*duplex) (struct driver_data *); + u8 pio_interrupt; /* use spi data irq */ + const struct bfin_spi_transfer_ops *ops; }; #define DEFINE_SPI_REG(reg, off) \ -static inline u16 read_##reg(struct driver_data *drv_data) \ +static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \ { return bfin_read16(drv_data->regs_base + off); } \ -static inline void write_##reg(struct driver_data *drv_data, u16 v) \ +static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \ { bfin_write16(drv_data->regs_base + off, v); } DEFINE_SPI_REG(CTRL, 0x00) @@ -133,7 +136,7 @@ DEFINE_SPI_REG(RDBR, 0x10) DEFINE_SPI_REG(BAUD, 0x14) DEFINE_SPI_REG(SHAW, 0x18) -static void bfin_spi_enable(struct driver_data *drv_data) +static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) { u16 cr; @@ -141,7 +144,7 @@ static void bfin_spi_enable(struct driver_data *drv_data) write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); } -static void bfin_spi_disable(struct driver_data *drv_data) +static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) { u16 cr; @@ -164,7 +167,7 @@ static u16 hz_to_spi_baud(u32 speed_hz) return spi_baud; } -static int bfin_spi_flush(struct driver_data *drv_data) +static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) { unsigned long limit = loops_per_jiffy << 1; @@ -178,13 +181,12 @@ static int bfin_spi_flush(struct driver_data *drv_data) } /* Chip select operation functions for cs_change flag */ -static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *chip) +static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) { - if (likely(chip->chip_select_num)) { + if (likely(chip->chip_select_num < MAX_CTRL_CS)) { u16 flag = read_FLAG(drv_data); - flag |= chip->flag; - flag &= ~(chip->flag << 8); + flag &= ~chip->flag; write_FLAG(drv_data, flag); } else { @@ -192,13 +194,13 @@ static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *c } } -static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data *chip) +static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) { - if (likely(chip->chip_select_num)) { + if (likely(chip->chip_select_num < MAX_CTRL_CS)) { u16 flag = read_FLAG(drv_data); - flag &= ~chip->flag; - flag |= (chip->flag << 8); + flag |= chip->flag; write_FLAG(drv_data, flag); } else { @@ -210,16 +212,43 @@ static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data udelay(chip->cs_chg_udelay); } +/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ +static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) +{ + if (chip->chip_select_num < MAX_CTRL_CS) { + u16 flag = read_FLAG(drv_data); + + flag |= (chip->flag >> 8); + + write_FLAG(drv_data, flag); + } +} + +static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) +{ + if (chip->chip_select_num < MAX_CTRL_CS) { + u16 flag = read_FLAG(drv_data); + + flag &= ~(chip->flag >> 8); + + write_FLAG(drv_data, flag); + } +} + /* stop controller and re-config current chip*/ -static void bfin_spi_restore_state(struct driver_data *drv_data) +static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) { - struct chip_data *chip = drv_data->cur_chip; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; /* Clear status and disable clock */ write_STAT(drv_data, BIT_STAT_CLR); bfin_spi_disable(drv_data); dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); + SSYNC(); + /* Load the registers */ write_CTRL(drv_data, chip->ctl_reg); write_BAUD(drv_data, chip->baud); @@ -229,49 +258,12 @@ static void bfin_spi_restore_state(struct driver_data *drv_data) } /* used to kick off transfer in rx mode and read unwanted RX data */ -static inline void bfin_spi_dummy_read(struct driver_data *drv_data) +static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) { (void) read_RDBR(drv_data); } -static void bfin_spi_null_writer(struct driver_data *drv_data) -{ - u8 n_bytes = drv_data->n_bytes; - u16 tx_val = drv_data->cur_chip->idle_tx_val; - - /* clear RXS (we check for RXS inside the loop) */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->tx < drv_data->tx_end) { - write_TDBR(drv_data, tx_val); - drv_data->tx += n_bytes; - /* wait until transfer finished. - checking SPIF or TXS may not guarantee transfer completion */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - /* discard RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - } -} - -static void bfin_spi_null_reader(struct driver_data *drv_data) -{ - u8 n_bytes = drv_data->n_bytes; - u16 tx_val = drv_data->cur_chip->idle_tx_val; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - write_TDBR(drv_data, tx_val); - drv_data->rx += n_bytes; - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - bfin_spi_dummy_read(drv_data); - } -} - -static void bfin_spi_u8_writer(struct driver_data *drv_data) +static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) { /* clear RXS (we check for RXS inside the loop) */ bfin_spi_dummy_read(drv_data); @@ -287,25 +279,7 @@ static void bfin_spi_u8_writer(struct driver_data *drv_data) } } -static void bfin_spi_u8_cs_chg_writer(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* clear RXS (we check for RXS inside the loop) */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->tx < drv_data->tx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); - /* make sure transfer finished before deactiving CS */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - bfin_spi_dummy_read(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u8_reader(struct driver_data *drv_data) +static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; @@ -320,25 +294,7 @@ static void bfin_spi_u8_reader(struct driver_data *drv_data) } } -static void bfin_spi_u8_cs_chg_reader(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - u16 tx_val = chip->idle_tx_val; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, tx_val); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u8_duplex(struct driver_data *drv_data) +static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data) { /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); @@ -351,24 +307,13 @@ static void bfin_spi_u8_duplex(struct driver_data *drv_data) } } -static void bfin_spi_u8_cs_chg_duplex(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} +static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { + .write = bfin_spi_u8_writer, + .read = bfin_spi_u8_reader, + .duplex = bfin_spi_u8_duplex, +}; -static void bfin_spi_u16_writer(struct driver_data *drv_data) +static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data) { /* clear RXS (we check for RXS inside the loop) */ bfin_spi_dummy_read(drv_data); @@ -385,26 +330,7 @@ static void bfin_spi_u16_writer(struct driver_data *drv_data) } } -static void bfin_spi_u16_cs_chg_writer(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* clear RXS (we check for RXS inside the loop) */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->tx < drv_data->tx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); - drv_data->tx += 2; - /* make sure transfer finished before deactiving CS */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - bfin_spi_dummy_read(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u16_reader(struct driver_data *drv_data) +static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; @@ -420,26 +346,7 @@ static void bfin_spi_u16_reader(struct driver_data *drv_data) } } -static void bfin_spi_u16_cs_chg_reader(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - u16 tx_val = chip->idle_tx_val; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, tx_val); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); - drv_data->rx += 2; - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u16_duplex(struct driver_data *drv_data) +static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data) { /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); @@ -454,27 +361,14 @@ static void bfin_spi_u16_duplex(struct driver_data *drv_data) } } -static void bfin_spi_u16_cs_chg_duplex(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); - drv_data->tx += 2; - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); - drv_data->rx += 2; - bfin_spi_cs_deactive(drv_data, chip); - } -} +static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { + .write = bfin_spi_u16_writer, + .read = bfin_spi_u16_reader, + .duplex = bfin_spi_u16_duplex, +}; -/* test if ther is more transfer to be done */ -static void *bfin_spi_next_transfer(struct driver_data *drv_data) +/* test if there is more transfer to be done */ +static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data) { struct spi_message *msg = drv_data->cur_msg; struct spi_transfer *trans = drv_data->cur_transfer; @@ -493,9 +387,9 @@ static void *bfin_spi_next_transfer(struct driver_data *drv_data) * caller already set message->status; * dma and pio irqs are blocked give finished message back */ -static void bfin_spi_giveback(struct driver_data *drv_data) +static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data) { - struct chip_data *chip = drv_data->cur_chip; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; struct spi_transfer *last_transfer; unsigned long flags; struct spi_message *msg; @@ -524,10 +418,83 @@ static void bfin_spi_giveback(struct driver_data *drv_data) msg->complete(msg->context); } +/* spi data irq handler */ +static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) +{ + struct bfin_spi_master_data *drv_data = dev_id; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; + struct spi_message *msg = drv_data->cur_msg; + int n_bytes = drv_data->n_bytes; + + /* wait until transfer finished. */ + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + + if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || + (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) { + /* last read */ + if (drv_data->rx) { + dev_dbg(&drv_data->pdev->dev, "last read\n"); + if (n_bytes == 2) + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + else if (n_bytes == 1) + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); + drv_data->rx += n_bytes; + } + + msg->actual_length += drv_data->len_in_bytes; + if (drv_data->cs_change) + bfin_spi_cs_deactive(drv_data, chip); + /* Move to next transfer */ + msg->state = bfin_spi_next_transfer(drv_data); + + disable_irq_nosync(drv_data->spi_irq); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + return IRQ_HANDLED; + } + + if (drv_data->rx && drv_data->tx) { + /* duplex */ + dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); + if (drv_data->n_bytes == 2) { + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + } else if (drv_data->n_bytes == 1) { + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + } + } else if (drv_data->rx) { + /* read */ + dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); + if (drv_data->n_bytes == 2) + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + else if (drv_data->n_bytes == 1) + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); + write_TDBR(drv_data, chip->idle_tx_val); + } else if (drv_data->tx) { + /* write */ + dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); + bfin_spi_dummy_read(drv_data); + if (drv_data->n_bytes == 2) + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + else if (drv_data->n_bytes == 1) + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + } + + if (drv_data->tx) + drv_data->tx += n_bytes; + if (drv_data->rx) + drv_data->rx += n_bytes; + + return IRQ_HANDLED; +} + static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) { - struct driver_data *drv_data = dev_id; - struct chip_data *chip = drv_data->cur_chip; + struct bfin_spi_master_data *drv_data = dev_id; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; struct spi_message *msg = drv_data->cur_msg; unsigned long timeout; unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); @@ -539,10 +506,6 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) clear_dma_irqstat(drv_data->dma_channel); - /* Wait for DMA to complete */ - while (get_dma_curr_irqstat(drv_data->dma_channel) & DMA_RUN) - cpu_relax(); - /* * wait for the last transaction shifted out. HRM states: * at this point there may still be data in the SPI DMA FIFO waiting @@ -550,8 +513,8 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) * register until it goes low for 2 successive reads */ if (drv_data->tx != NULL) { - while ((read_STAT(drv_data) & TXS) || - (read_STAT(drv_data) & TXS)) + while ((read_STAT(drv_data) & BIT_STAT_TXS) || + (read_STAT(drv_data) & BIT_STAT_TXS)) cpu_relax(); } @@ -560,14 +523,14 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) dmastat, read_STAT(drv_data)); timeout = jiffies + HZ; - while (!(read_STAT(drv_data) & SPIF)) + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) if (!time_before(jiffies, timeout)) { dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); break; } else cpu_relax(); - if ((dmastat & DMA_ERR) && (spistat & RBSY)) { + if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) { msg->state = ERROR_STATE; dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); } else { @@ -587,20 +550,20 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) dev_dbg(&drv_data->pdev->dev, "disable dma channel irq%d\n", drv_data->dma_channel); - dma_disable_irq(drv_data->dma_channel); + dma_disable_irq_nosync(drv_data->dma_channel); return IRQ_HANDLED; } static void bfin_spi_pump_transfers(unsigned long data) { - struct driver_data *drv_data = (struct driver_data *)data; + struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; - struct chip_data *chip = NULL; - u8 width; - u16 cr, dma_width, dma_config; + struct bfin_spi_slave_data *chip = NULL; + unsigned int bits_per_word; + u16 cr, cr_width, dma_width, dma_config; u32 tranf_success = 1; u8 full_duplex = 0; @@ -638,7 +601,7 @@ static void bfin_spi_pump_transfers(unsigned long data) udelay(previous->delay_usecs); } - /* Setup the transfer state based on the type of transfer */ + /* Flush any existing transfers that may be sitting in the hardware */ if (bfin_spi_flush(drv_data) == 0) { dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); message->status = -EIO; @@ -678,52 +641,31 @@ static void bfin_spi_pump_transfers(unsigned long data) drv_data->cs_change = transfer->cs_change; /* Bits per word setup */ - switch (transfer->bits_per_word) { - case 8: + bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; + if (bits_per_word == 8) { drv_data->n_bytes = 1; - width = CFG_SPI_WORDSIZE8; - drv_data->read = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader; - drv_data->write = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer; - drv_data->duplex = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex; - break; - - case 16: + drv_data->len = transfer->len; + cr_width = 0; + drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; + } else if (bits_per_word == 16) { drv_data->n_bytes = 2; - width = CFG_SPI_WORDSIZE16; - drv_data->read = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader; - drv_data->write = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer; - drv_data->duplex = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex; - break; - - default: - /* No change, the same as default setting */ - drv_data->n_bytes = chip->n_bytes; - width = chip->width; - drv_data->write = drv_data->tx ? chip->write : bfin_spi_null_writer; - drv_data->read = drv_data->rx ? chip->read : bfin_spi_null_reader; - drv_data->duplex = chip->duplex ? chip->duplex : bfin_spi_null_writer; - break; - } - cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); - cr |= (width << 8); - write_CTRL(drv_data, cr); - - if (width == CFG_SPI_WORDSIZE16) { drv_data->len = (transfer->len) >> 1; + cr_width = BIT_CTL_WORDSIZE; + drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; } else { - drv_data->len = transfer->len; + dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n"); + message->status = -EINVAL; + bfin_spi_giveback(drv_data); + return; } + cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); + cr |= cr_width; + write_CTRL(drv_data, cr); + dev_dbg(&drv_data->pdev->dev, - "transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n", - drv_data->write, chip->write, bfin_spi_null_writer); + "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", + drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8); - /* speed and width has been set on per message */ message->state = RUNNING_STATE; dma_config = 0; @@ -734,13 +676,11 @@ static void bfin_spi_pump_transfers(unsigned long data) write_BAUD(drv_data, chip->baud); write_STAT(drv_data, BIT_STAT_CLR); - cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); - if (drv_data->cs_change) - bfin_spi_cs_active(drv_data, chip); + bfin_spi_cs_active(drv_data, chip); dev_dbg(&drv_data->pdev->dev, "now pumping a transfer: width is %d, len is %d\n", - width, transfer->len); + cr_width, transfer->len); /* * Try to map dma buffer and do a dma transfer. If successful use, @@ -759,7 +699,7 @@ static void bfin_spi_pump_transfers(unsigned long data) /* config dma channel */ dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); set_dma_x_count(drv_data->dma_channel, drv_data->len); - if (width == CFG_SPI_WORDSIZE16) { + if (cr_width == BIT_CTL_WORDSIZE) { set_dma_x_modify(drv_data->dma_channel, 2); dma_width = WDSIZE_16; } else { @@ -845,73 +785,100 @@ static void bfin_spi_pump_transfers(unsigned long data) dma_enable_irq(drv_data->dma_channel); local_irq_restore(flags); - } else { - /* IO mode write then read */ - dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); - - /* we always use SPI_WRITE mode. SPI_READ mode - seems to have problems with setting up the - output value in TDBR prior to the transfer. */ - write_CTRL(drv_data, (cr | CFG_SPI_WRITE)); - - if (full_duplex) { - /* full duplex mode */ - BUG_ON((drv_data->tx_end - drv_data->tx) != - (drv_data->rx_end - drv_data->rx)); - dev_dbg(&drv_data->pdev->dev, - "IO duplex: cr is 0x%x\n", cr); - - drv_data->duplex(drv_data); + return; + } - if (drv_data->tx != drv_data->tx_end) - tranf_success = 0; - } else if (drv_data->tx != NULL) { - /* write only half duplex */ - dev_dbg(&drv_data->pdev->dev, - "IO write: cr is 0x%x\n", cr); + /* + * We always use SPI_WRITE mode (transfer starts with TDBR write). + * SPI_READ mode (transfer starts with RDBR read) seems to have + * problems with setting up the output value in TDBR prior to the + * start of the transfer. + */ + write_CTRL(drv_data, cr | BIT_CTL_TXMOD); - drv_data->write(drv_data); + if (chip->pio_interrupt) { + /* SPI irq should have been disabled by now */ - if (drv_data->tx != drv_data->tx_end) - tranf_success = 0; - } else if (drv_data->rx != NULL) { - /* read only half duplex */ - dev_dbg(&drv_data->pdev->dev, - "IO read: cr is 0x%x\n", cr); + /* discard old RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); - drv_data->read(drv_data); - if (drv_data->rx != drv_data->rx_end) - tranf_success = 0; + /* start transfer */ + if (drv_data->tx == NULL) + write_TDBR(drv_data, chip->idle_tx_val); + else { + if (bits_per_word == 8) + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + else + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + drv_data->tx += drv_data->n_bytes; } - if (!tranf_success) { - dev_dbg(&drv_data->pdev->dev, - "IO write error!\n"); - message->state = ERROR_STATE; - } else { - /* Update total byte transfered */ - message->actual_length += drv_data->len_in_bytes; - /* Move to next transfer of this msg */ - message->state = bfin_spi_next_transfer(drv_data); - if (drv_data->cs_change) - bfin_spi_cs_deactive(drv_data, chip); - } - /* Schedule next transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); + /* once TDBR is empty, interrupt is triggered */ + enable_irq(drv_data->spi_irq); + return; + } + + /* IO mode */ + dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); + + if (full_duplex) { + /* full duplex mode */ + BUG_ON((drv_data->tx_end - drv_data->tx) != + (drv_data->rx_end - drv_data->rx)); + dev_dbg(&drv_data->pdev->dev, + "IO duplex: cr is 0x%x\n", cr); + + drv_data->ops->duplex(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->tx != NULL) { + /* write only half duplex */ + dev_dbg(&drv_data->pdev->dev, + "IO write: cr is 0x%x\n", cr); + + drv_data->ops->write(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->rx != NULL) { + /* read only half duplex */ + dev_dbg(&drv_data->pdev->dev, + "IO read: cr is 0x%x\n", cr); + + drv_data->ops->read(drv_data); + if (drv_data->rx != drv_data->rx_end) + tranf_success = 0; + } + + if (!tranf_success) { + dev_dbg(&drv_data->pdev->dev, + "IO write error!\n"); + message->state = ERROR_STATE; + } else { + /* Update total byte transfered */ + message->actual_length += drv_data->len_in_bytes; + /* Move to next transfer of this msg */ + message->state = bfin_spi_next_transfer(drv_data); + if (drv_data->cs_change) + bfin_spi_cs_deactive(drv_data, chip); } + + /* Schedule next transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); } /* pop a msg from queue and kick off real transfer */ static void bfin_spi_pump_messages(struct work_struct *work) { - struct driver_data *drv_data; + struct bfin_spi_master_data *drv_data; unsigned long flags; - drv_data = container_of(work, struct driver_data, pump_messages); + drv_data = container_of(work, struct bfin_spi_master_data, pump_messages); /* Lock queue and check for queue work */ spin_lock_irqsave(&drv_data->lock, flags); - if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { + if (list_empty(&drv_data->queue) || !drv_data->running) { /* pumper kicked off but no work to do */ drv_data->busy = 0; spin_unlock_irqrestore(&drv_data->lock, flags); @@ -961,12 +928,12 @@ static void bfin_spi_pump_messages(struct work_struct *work) */ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) { - struct driver_data *drv_data = spi_master_get_devdata(spi->master); + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); - if (drv_data->run == QUEUE_STOPPED) { + if (!drv_data->running) { spin_unlock_irqrestore(&drv_data->lock, flags); return -ESHUTDOWN; } @@ -978,7 +945,7 @@ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) dev_dbg(&spi->dev, "adding an msg in transfer() \n"); list_add_tail(&msg->queue, &drv_data->queue); - if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) + if (drv_data->running && !drv_data->busy) queue_work(drv_data->workqueue, &drv_data->pump_messages); spin_unlock_irqrestore(&drv_data->lock, flags); @@ -1002,147 +969,184 @@ static u16 ssel[][MAX_SPI_SSEL] = { P_SPI2_SSEL6, P_SPI2_SSEL7}, }; -/* first setup for new devices */ +/* setup for devices (may be called multiple times -- not just first setup) */ static int bfin_spi_setup(struct spi_device *spi) { - struct bfin5xx_spi_chip *chip_info = NULL; - struct chip_data *chip; - struct driver_data *drv_data = spi_master_get_devdata(spi->master); - int ret; - - if (spi->bits_per_word != 8 && spi->bits_per_word != 16) - return -EINVAL; + struct bfin5xx_spi_chip *chip_info; + struct bfin_spi_slave_data *chip = NULL; + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); + u16 bfin_ctl_reg; + int ret = -EINVAL; /* Only alloc (or use chip_info) on first setup */ + chip_info = NULL; chip = spi_get_ctldata(spi); if (chip == NULL) { - chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); - if (!chip) - return -ENOMEM; + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) { + dev_err(&spi->dev, "cannot allocate chip data\n"); + ret = -ENOMEM; + goto error; + } chip->enable_dma = 0; chip_info = spi->controller_data; } + /* Let people set non-standard bits directly */ + bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO | + BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ; + /* chip_info isn't always needed */ if (chip_info) { /* Make sure people stop trying to set fields via ctl_reg * when they should actually be using common SPI framework. - * Currently we let through: WOM EMISO PSSE GM SZ TIMOD. + * Currently we let through: WOM EMISO PSSE GM SZ. * Not sure if a user actually needs/uses any of these, * but let's assume (for now) they do. */ - if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) { + if (chip_info->ctl_reg & ~bfin_ctl_reg) { dev_err(&spi->dev, "do not set bits in ctl_reg " "that the SPI framework manages\n"); - return -EINVAL; + goto error; } - chip->enable_dma = chip_info->enable_dma != 0 && drv_data->master_info->enable_dma; chip->ctl_reg = chip_info->ctl_reg; - chip->bits_per_word = chip_info->bits_per_word; - chip->cs_change_per_word = chip_info->cs_change_per_word; chip->cs_chg_udelay = chip_info->cs_chg_udelay; - chip->cs_gpio = chip_info->cs_gpio; chip->idle_tx_val = chip_info->idle_tx_val; + chip->pio_interrupt = chip_info->pio_interrupt; + spi->bits_per_word = chip_info->bits_per_word; + } else { + /* force a default base state */ + chip->ctl_reg &= bfin_ctl_reg; + } + + if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { + dev_err(&spi->dev, "%d bits_per_word is not supported\n", + spi->bits_per_word); + goto error; } /* translate common spi framework into our register */ + if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { + dev_err(&spi->dev, "unsupported spi modes detected\n"); + goto error; + } if (spi->mode & SPI_CPOL) - chip->ctl_reg |= CPOL; + chip->ctl_reg |= BIT_CTL_CPOL; if (spi->mode & SPI_CPHA) - chip->ctl_reg |= CPHA; + chip->ctl_reg |= BIT_CTL_CPHA; if (spi->mode & SPI_LSB_FIRST) - chip->ctl_reg |= LSBF; + chip->ctl_reg |= BIT_CTL_LSBF; /* we dont support running in slave mode (yet?) */ - chip->ctl_reg |= MSTR; + chip->ctl_reg |= BIT_CTL_MASTER; /* + * Notice: for blackfin, the speed_hz is the value of register + * SPI_BAUD, not the real baudrate + */ + chip->baud = hz_to_spi_baud(spi->max_speed_hz); + chip->chip_select_num = spi->chip_select; + if (chip->chip_select_num < MAX_CTRL_CS) { + if (!(spi->mode & SPI_CPHA)) + dev_warn(&spi->dev, "Warning: SPI CPHA not set:" + " Slave Select not under software control!\n" + " See Documentation/blackfin/bfin-spi-notes.txt"); + + chip->flag = (1 << spi->chip_select) << 8; + } else + chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS; + + if (chip->enable_dma && chip->pio_interrupt) { + dev_err(&spi->dev, "enable_dma is set, " + "do not set pio_interrupt\n"); + goto error; + } + /* * if any one SPI chip is registered and wants DMA, request the * DMA channel for it */ if (chip->enable_dma && !drv_data->dma_requested) { /* register dma irq handler */ - if (request_dma(drv_data->dma_channel, "BFIN_SPI_DMA") < 0) { - dev_dbg(&spi->dev, + ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA"); + if (ret) { + dev_err(&spi->dev, "Unable to request BlackFin SPI DMA channel\n"); - return -ENODEV; + goto error; } - if (set_dma_callback(drv_data->dma_channel, - bfin_spi_dma_irq_handler, drv_data) < 0) { - dev_dbg(&spi->dev, "Unable to set dma callback\n"); - return -EPERM; + drv_data->dma_requested = 1; + + ret = set_dma_callback(drv_data->dma_channel, + bfin_spi_dma_irq_handler, drv_data); + if (ret) { + dev_err(&spi->dev, "Unable to set dma callback\n"); + goto error; } dma_disable_irq(drv_data->dma_channel); - drv_data->dma_requested = 1; } - /* - * Notice: for blackfin, the speed_hz is the value of register - * SPI_BAUD, not the real baudrate - */ - chip->baud = hz_to_spi_baud(spi->max_speed_hz); - chip->flag = 1 << (spi->chip_select); - chip->chip_select_num = spi->chip_select; + if (chip->pio_interrupt && !drv_data->irq_requested) { + ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler, + IRQF_DISABLED, "BFIN_SPI", drv_data); + if (ret) { + dev_err(&spi->dev, "Unable to register spi IRQ\n"); + goto error; + } + drv_data->irq_requested = 1; + /* we use write mode, spi irq has to be disabled here */ + disable_irq(drv_data->spi_irq); + } - if (chip->chip_select_num == 0) { + if (chip->chip_select_num >= MAX_CTRL_CS) { ret = gpio_request(chip->cs_gpio, spi->modalias); if (ret) { - if (drv_data->dma_requested) - free_dma(drv_data->dma_channel); - return ret; + dev_err(&spi->dev, "gpio_request() error\n"); + goto pin_error; } gpio_direction_output(chip->cs_gpio, 1); } - switch (chip->bits_per_word) { - case 8: - chip->n_bytes = 1; - chip->width = CFG_SPI_WORDSIZE8; - chip->read = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader; - chip->write = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer; - chip->duplex = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex; - break; - - case 16: - chip->n_bytes = 2; - chip->width = CFG_SPI_WORDSIZE16; - chip->read = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader; - chip->write = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer; - chip->duplex = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex; - break; - - default: - dev_err(&spi->dev, "%d bits_per_word is not supported\n", - chip->bits_per_word); - if (chip_info) - kfree(chip); - return -ENODEV; - } - dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", - spi->modalias, chip->width, chip->enable_dma); + spi->modalias, spi->bits_per_word, chip->enable_dma); dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", chip->ctl_reg, chip->flag); spi_set_ctldata(spi, chip); dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); - if ((chip->chip_select_num > 0) - && (chip->chip_select_num <= spi->master->num_chipselect)) - peripheral_request(ssel[spi->master->bus_num] - [chip->chip_select_num-1], spi->modalias); + if (chip->chip_select_num < MAX_CTRL_CS) { + ret = peripheral_request(ssel[spi->master->bus_num] + [chip->chip_select_num-1], spi->modalias); + if (ret) { + dev_err(&spi->dev, "peripheral_request() error\n"); + goto pin_error; + } + } + bfin_spi_cs_enable(drv_data, chip); bfin_spi_cs_deactive(drv_data, chip); return 0; + + pin_error: + if (chip->chip_select_num >= MAX_CTRL_CS) + gpio_free(chip->cs_gpio); + else + peripheral_free(ssel[spi->master->bus_num] + [chip->chip_select_num - 1]); + error: + if (chip) { + if (drv_data->dma_requested) + free_dma(drv_data->dma_channel); + drv_data->dma_requested = 0; + + kfree(chip); + /* prevent free 'chip' twice */ + spi_set_ctldata(spi, NULL); + } + + return ret; } /* @@ -1151,28 +1155,30 @@ static int bfin_spi_setup(struct spi_device *spi) */ static void bfin_spi_cleanup(struct spi_device *spi) { - struct chip_data *chip = spi_get_ctldata(spi); + struct bfin_spi_slave_data *chip = spi_get_ctldata(spi); + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); if (!chip) return; - if ((chip->chip_select_num > 0) - && (chip->chip_select_num <= spi->master->num_chipselect)) + if (chip->chip_select_num < MAX_CTRL_CS) { peripheral_free(ssel[spi->master->bus_num] [chip->chip_select_num-1]); - - if (chip->chip_select_num == 0) + bfin_spi_cs_disable(drv_data, chip); + } else gpio_free(chip->cs_gpio); kfree(chip); + /* prevent free 'chip' twice */ + spi_set_ctldata(spi, NULL); } -static inline int bfin_spi_init_queue(struct driver_data *drv_data) +static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) { INIT_LIST_HEAD(&drv_data->queue); spin_lock_init(&drv_data->lock); - drv_data->run = QUEUE_STOPPED; + drv_data->running = false; drv_data->busy = 0; /* init transfer tasklet */ @@ -1189,18 +1195,18 @@ static inline int bfin_spi_init_queue(struct driver_data *drv_data) return 0; } -static inline int bfin_spi_start_queue(struct driver_data *drv_data) +static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) { unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); - if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { + if (drv_data->running || drv_data->busy) { spin_unlock_irqrestore(&drv_data->lock, flags); return -EBUSY; } - drv_data->run = QUEUE_RUNNING; + drv_data->running = true; drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; drv_data->cur_chip = NULL; @@ -1211,7 +1217,7 @@ static inline int bfin_spi_start_queue(struct driver_data *drv_data) return 0; } -static inline int bfin_spi_stop_queue(struct driver_data *drv_data) +static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) { unsigned long flags; unsigned limit = 500; @@ -1225,7 +1231,7 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data) * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ - drv_data->run = QUEUE_STOPPED; + drv_data->running = false; while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { spin_unlock_irqrestore(&drv_data->lock, flags); msleep(10); @@ -1240,7 +1246,7 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data) return status; } -static inline int bfin_spi_destroy_queue(struct driver_data *drv_data) +static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) { int status; @@ -1258,14 +1264,14 @@ static int __init bfin_spi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct bfin5xx_spi_master *platform_info; struct spi_master *master; - struct driver_data *drv_data = 0; + struct bfin_spi_master_data *drv_data; struct resource *res; int status = 0; platform_info = dev->platform_data; /* Allocate master with space for drv_data */ - master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); + master = spi_alloc_master(dev, sizeof(*drv_data)); if (!master) { dev_err(&pdev->dev, "can not alloc spi_master\n"); return -ENOMEM; @@ -1301,11 +1307,19 @@ static int __init bfin_spi_probe(struct platform_device *pdev) goto out_error_ioremap; } - drv_data->dma_channel = platform_get_irq(pdev, 0); - if (drv_data->dma_channel < 0) { + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (res == NULL) { dev_err(dev, "No DMA channel specified\n"); status = -ENOENT; - goto out_error_no_dma_ch; + goto out_error_free_io; + } + drv_data->dma_channel = res->start; + + drv_data->spi_irq = platform_get_irq(pdev, 0); + if (drv_data->spi_irq < 0) { + dev_err(dev, "No spi pio irq specified\n"); + status = -ENOENT; + goto out_error_free_io; } /* Initial and start queue */ @@ -1327,6 +1341,12 @@ static int __init bfin_spi_probe(struct platform_device *pdev) goto out_error_queue_alloc; } + /* Reset SPI registers. If these registers were used by the boot loader, + * the sky may fall on your head if you enable the dma controller. + */ + write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); + write_FLAG(drv_data, 0xFF00); + /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); status = spi_register_master(master); @@ -1342,7 +1362,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev) out_error_queue_alloc: bfin_spi_destroy_queue(drv_data); -out_error_no_dma_ch: +out_error_free_io: iounmap((void *) drv_data->regs_base); out_error_ioremap: out_error_get_res: @@ -1354,7 +1374,7 @@ out_error_get_res: /* stop hardware and remove the driver */ static int __devexit bfin_spi_remove(struct platform_device *pdev) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; if (!drv_data) @@ -1374,6 +1394,11 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev) free_dma(drv_data->dma_channel); } + if (drv_data->irq_requested) { + free_irq(drv_data->spi_irq, drv_data); + drv_data->irq_requested = 0; + } + /* Disconnect from the SPI framework */ spi_unregister_master(drv_data->master); @@ -1388,26 +1413,32 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; status = bfin_spi_stop_queue(drv_data); if (status != 0) return status; - /* stop hardware */ - bfin_spi_disable(drv_data); + drv_data->ctrl_reg = read_CTRL(drv_data); + drv_data->flag_reg = read_FLAG(drv_data); + + /* + * reset SPI_CTL and SPI_FLG registers + */ + write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); + write_FLAG(drv_data, 0xFF00); return 0; } static int bfin_spi_resume(struct platform_device *pdev) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; - /* Enable the SPI interface */ - bfin_spi_enable(drv_data); + write_CTRL(drv_data, drv_data->ctrl_reg); + write_FLAG(drv_data, drv_data->flag_reg); /* Start the queue running */ status = bfin_spi_start_queue(drv_data); @@ -1438,7 +1469,7 @@ static int __init bfin_spi_init(void) { return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); } -module_init(bfin_spi_init); +subsys_initcall(bfin_spi_init); static void __exit bfin_spi_exit(void) { diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index f1db395dd88..8b55724d5f3 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c @@ -23,6 +23,7 @@ #include <linux/delay.h> #include <linux/errno.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> @@ -258,7 +259,6 @@ static void bitbang_work(struct work_struct *work) struct spi_bitbang *bitbang = container_of(work, struct spi_bitbang, work); unsigned long flags; - int do_setup = -1; int (*setup_transfer)(struct spi_device *, struct spi_transfer *); @@ -274,6 +274,7 @@ static void bitbang_work(struct work_struct *work) unsigned tmp; unsigned cs_change; int status; + int do_setup = -1; m = container_of(bitbang->queue.next, struct spi_message, queue); @@ -306,6 +307,8 @@ static void bitbang_work(struct work_struct *work) status = setup_transfer(spi, t); if (status < 0) break; + if (do_setup == -1) + do_setup = 0; } /* set up default clock polarity, and activate chip; @@ -366,11 +369,6 @@ static void bitbang_work(struct work_struct *work) m->status = status; m->complete(m->context); - /* restore speed and wordsize if it was overridden */ - if (do_setup == 1) - setup_transfer(spi, NULL); - do_setup = 0; - /* normally deactivate chipselect ... unless no error and * cs_change has hinted that the next message will probably * be for this chip too. diff --git a/drivers/spi/spi_bitbang_txrx.h b/drivers/spi/spi_bitbang_txrx.h new file mode 100644 index 00000000000..c16bf853c3e --- /dev/null +++ b/drivers/spi/spi_bitbang_txrx.h @@ -0,0 +1,97 @@ +/* + * Mix this utility code with some glue code to get one of several types of + * simple SPI master driver. Two do polled word-at-a-time I/O: + * + * - GPIO/parport bitbangers. Provide chipselect() and txrx_word[](), + * expanding the per-word routines from the inline templates below. + * + * - Drivers for controllers resembling bare shift registers. Provide + * chipselect() and txrx_word[](), with custom setup()/cleanup() methods + * that use your controller's clock and chipselect registers. + * + * Some hardware works well with requests at spi_transfer scope: + * + * - Drivers leveraging smarter hardware, with fifos or DMA; or for half + * duplex (MicroWire) controllers. Provide chipselect() and txrx_bufs(), + * and custom setup()/cleanup() methods. + */ + +/* + * The code that knows what GPIO pins do what should have declared four + * functions, ideally as inlines, before including this header: + * + * void setsck(struct spi_device *, int is_on); + * void setmosi(struct spi_device *, int is_on); + * int getmiso(struct spi_device *); + * void spidelay(unsigned); + * + * setsck()'s is_on parameter is a zero/nonzero boolean. + * + * setmosi()'s is_on parameter is a zero/nonzero boolean. + * + * getmiso() is required to return 0 or 1 only. Any other value is invalid + * and will result in improper operation. + * + * A non-inlined routine would call bitbang_txrx_*() routines. The + * main loop could easily compile down to a handful of instructions, + * especially if the delay is a NOP (to run at peak speed). + * + * Since this is software, the timings may not be exactly what your board's + * chips need ... there may be several reasons you'd need to tweak timings + * in these routines, not just make to make it faster or slower to match a + * particular CPU clock rate. + */ + +static inline u32 +bitbang_txrx_be_cpha0(struct spi_device *spi, + unsigned nsecs, unsigned cpol, unsigned flags, + u32 word, u8 bits) +{ + /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ + + /* clock starts at inactive polarity */ + for (word <<= (32 - bits); likely(bits); bits--) { + + /* setup MSB (to slave) on trailing edge */ + if ((flags & SPI_MASTER_NO_TX) == 0) + setmosi(spi, word & (1 << 31)); + spidelay(nsecs); /* T(setup) */ + + setsck(spi, !cpol); + spidelay(nsecs); + + /* sample MSB (from slave) on leading edge */ + word <<= 1; + if ((flags & SPI_MASTER_NO_RX) == 0) + word |= getmiso(spi); + setsck(spi, cpol); + } + return word; +} + +static inline u32 +bitbang_txrx_be_cpha1(struct spi_device *spi, + unsigned nsecs, unsigned cpol, unsigned flags, + u32 word, u8 bits) +{ + /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ + + /* clock starts at inactive polarity */ + for (word <<= (32 - bits); likely(bits); bits--) { + + /* setup MSB (to slave) on leading edge */ + setsck(spi, !cpol); + if ((flags & SPI_MASTER_NO_TX) == 0) + setmosi(spi, word & (1 << 31)); + spidelay(nsecs); /* T(setup) */ + + setsck(spi, cpol); + spidelay(nsecs); + + /* sample MSB (from slave) on trailing edge */ + word <<= 1; + if ((flags & SPI_MASTER_NO_RX) == 0) + word |= getmiso(spi); + } + return word; +} diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c index c2184866fa9..0d4ceba3b59 100644 --- a/drivers/spi/spi_butterfly.c +++ b/drivers/spi/spi_butterfly.c @@ -149,15 +149,14 @@ static void butterfly_chipselect(struct spi_device *spi, int value) #define spidelay(X) do{}while(0) //#define spidelay ndelay -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static u32 butterfly_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } /*----------------------------------------------------------------------*/ diff --git a/drivers/spi/spi_fsl_espi.c b/drivers/spi/spi_fsl_espi.c new file mode 100644 index 00000000000..e3b4f645196 --- /dev/null +++ b/drivers/spi/spi_fsl_espi.c @@ -0,0 +1,748 @@ +/* + * Freescale eSPI controller driver. + * + * Copyright 2010 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/spi/spi.h> +#include <linux/platform_device.h> +#include <linux/fsl_devices.h> +#include <linux/mm.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_spi.h> +#include <linux/interrupt.h> +#include <linux/err.h> +#include <sysdev/fsl_soc.h> + +#include "spi_fsl_lib.h" + +/* eSPI Controller registers */ +struct fsl_espi_reg { + __be32 mode; /* 0x000 - eSPI mode register */ + __be32 event; /* 0x004 - eSPI event register */ + __be32 mask; /* 0x008 - eSPI mask register */ + __be32 command; /* 0x00c - eSPI command register */ + __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/ + __be32 receive; /* 0x014 - eSPI receive FIFO access register*/ + u8 res[8]; /* 0x018 - 0x01c reserved */ + __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */ +}; + +struct fsl_espi_transfer { + const void *tx_buf; + void *rx_buf; + unsigned len; + unsigned n_tx; + unsigned n_rx; + unsigned actual_length; + int status; +}; + +/* eSPI Controller mode register definitions */ +#define SPMODE_ENABLE (1 << 31) +#define SPMODE_LOOP (1 << 30) +#define SPMODE_TXTHR(x) ((x) << 8) +#define SPMODE_RXTHR(x) ((x) << 0) + +/* eSPI Controller CS mode register definitions */ +#define CSMODE_CI_INACTIVEHIGH (1 << 31) +#define CSMODE_CP_BEGIN_EDGECLK (1 << 30) +#define CSMODE_REV (1 << 29) +#define CSMODE_DIV16 (1 << 28) +#define CSMODE_PM(x) ((x) << 24) +#define CSMODE_POL_1 (1 << 20) +#define CSMODE_LEN(x) ((x) << 16) +#define CSMODE_BEF(x) ((x) << 12) +#define CSMODE_AFT(x) ((x) << 8) +#define CSMODE_CG(x) ((x) << 3) + +/* Default mode/csmode for eSPI controller */ +#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3)) +#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ + | CSMODE_AFT(0) | CSMODE_CG(1)) + +/* SPIE register values */ +#define SPIE_NE 0x00000200 /* Not empty */ +#define SPIE_NF 0x00000100 /* Not full */ + +/* SPIM register values */ +#define SPIM_NE 0x00000200 /* Not empty */ +#define SPIM_NF 0x00000100 /* Not full */ +#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) +#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) + +/* SPCOM register values */ +#define SPCOM_CS(x) ((x) << 30) +#define SPCOM_TRANLEN(x) ((x) << 0) +#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ + +static void fsl_espi_change_mode(struct spi_device *spi) +{ + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); + struct spi_mpc8xxx_cs *cs = spi->controller_state; + struct fsl_espi_reg *reg_base = mspi->reg_base; + __be32 __iomem *mode = ®_base->csmode[spi->chip_select]; + __be32 __iomem *espi_mode = ®_base->mode; + u32 tmp; + unsigned long flags; + + /* Turn off IRQs locally to minimize time that SPI is disabled. */ + local_irq_save(flags); + + /* Turn off SPI unit prior changing mode */ + tmp = mpc8xxx_spi_read_reg(espi_mode); + mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE); + mpc8xxx_spi_write_reg(mode, cs->hw_mode); + mpc8xxx_spi_write_reg(espi_mode, tmp); + + local_irq_restore(flags); +} + +static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi) +{ + u32 data; + u16 data_h; + u16 data_l; + const u32 *tx = mpc8xxx_spi->tx; + + if (!tx) + return 0; + + data = *tx++ << mpc8xxx_spi->tx_shift; + data_l = data & 0xffff; + data_h = (data >> 16) & 0xffff; + swab16s(&data_l); + swab16s(&data_h); + data = data_h | data_l; + + mpc8xxx_spi->tx = tx; + return data; +} + +static int fsl_espi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + int bits_per_word = 0; + u8 pm; + u32 hz = 0; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } + + /* spi_transfer level calls that work per-word */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + + /* Make sure its a bit width we support [4..16] */ + if ((bits_per_word < 4) || (bits_per_word > 16)) + return -EINVAL; + + if (!hz) + hz = spi->max_speed_hz; + + cs->rx_shift = 0; + cs->tx_shift = 0; + cs->get_rx = mpc8xxx_spi_rx_buf_u32; + cs->get_tx = mpc8xxx_spi_tx_buf_u32; + if (bits_per_word <= 8) { + cs->rx_shift = 8 - bits_per_word; + } else if (bits_per_word <= 16) { + cs->rx_shift = 16 - bits_per_word; + if (spi->mode & SPI_LSB_FIRST) + cs->get_tx = fsl_espi_tx_buf_lsb; + } else { + return -EINVAL; + } + + mpc8xxx_spi->rx_shift = cs->rx_shift; + mpc8xxx_spi->tx_shift = cs->tx_shift; + mpc8xxx_spi->get_rx = cs->get_rx; + mpc8xxx_spi->get_tx = cs->get_tx; + + bits_per_word = bits_per_word - 1; + + /* mask out bits we are going to set */ + cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); + + cs->hw_mode |= CSMODE_LEN(bits_per_word); + + if ((mpc8xxx_spi->spibrg / hz) > 64) { + cs->hw_mode |= CSMODE_DIV16; + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; + + WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " + "Will use %d Hz instead.\n", dev_name(&spi->dev), + hz, mpc8xxx_spi->spibrg / 1024); + if (pm > 16) + pm = 16; + } else { + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; + } + if (pm) + pm--; + + cs->hw_mode |= CSMODE_PM(pm); + + fsl_espi_change_mode(spi); + return 0; +} + +static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, + unsigned int len) +{ + u32 word; + struct fsl_espi_reg *reg_base = mspi->reg_base; + + mspi->count = len; + + /* enable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); + + /* transmit word */ + word = mspi->get_tx(mspi); + mpc8xxx_spi_write_reg(®_base->transmit, word); + + return 0; +} + +static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base; + unsigned int len = t->len; + u8 bits_per_word; + int ret; + + bits_per_word = spi->bits_per_word; + if (t->bits_per_word) + bits_per_word = t->bits_per_word; + + mpc8xxx_spi->len = t->len; + len = roundup(len, 4) / 4; + + mpc8xxx_spi->tx = t->tx_buf; + mpc8xxx_spi->rx = t->rx_buf; + + INIT_COMPLETION(mpc8xxx_spi->done); + + /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ + if ((t->len - 1) > SPCOM_TRANLEN_MAX) { + dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" + " beyond the SPCOM[TRANLEN] field\n", t->len); + return -EINVAL; + } + mpc8xxx_spi_write_reg(®_base->command, + (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1))); + + ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len); + if (ret) + return ret; + + wait_for_completion(&mpc8xxx_spi->done); + + /* disable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, 0); + + return mpc8xxx_spi->count; +} + +static void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) +{ + if (cmd[1] && cmd[2] && cmd[3]) { + cmd[1] = (u8)(addr >> 16); + cmd[2] = (u8)(addr >> 8); + cmd[3] = (u8)(addr >> 0); + } +} + +static unsigned int fsl_espi_cmd2addr(u8 *cmd) +{ + if (cmd[1] && cmd[2] && cmd[3]) + return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0; + + return 0; +} + +static void fsl_espi_do_trans(struct spi_message *m, + struct fsl_espi_transfer *tr) +{ + struct spi_device *spi = m->spi; + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); + struct fsl_espi_transfer *espi_trans = tr; + struct spi_message message; + struct spi_transfer *t, *first, trans; + int status = 0; + + spi_message_init(&message); + memset(&trans, 0, sizeof(trans)); + + first = list_first_entry(&m->transfers, struct spi_transfer, + transfer_list); + list_for_each_entry(t, &m->transfers, transfer_list) { + if ((first->bits_per_word != t->bits_per_word) || + (first->speed_hz != t->speed_hz)) { + espi_trans->status = -EINVAL; + dev_err(mspi->dev, "bits_per_word/speed_hz should be" + " same for the same SPI transfer\n"); + return; + } + + trans.speed_hz = t->speed_hz; + trans.bits_per_word = t->bits_per_word; + trans.delay_usecs = max(first->delay_usecs, t->delay_usecs); + } + + trans.len = espi_trans->len; + trans.tx_buf = espi_trans->tx_buf; + trans.rx_buf = espi_trans->rx_buf; + spi_message_add_tail(&trans, &message); + + list_for_each_entry(t, &message.transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + status = -EINVAL; + + status = fsl_espi_setup_transfer(spi, t); + if (status < 0) + break; + } + + if (t->len) + status = fsl_espi_bufs(spi, t); + + if (status) { + status = -EMSGSIZE; + break; + } + + if (t->delay_usecs) + udelay(t->delay_usecs); + } + + espi_trans->status = status; + fsl_espi_setup_transfer(spi, NULL); +} + +static void fsl_espi_cmd_trans(struct spi_message *m, + struct fsl_espi_transfer *trans, u8 *rx_buff) +{ + struct spi_transfer *t; + u8 *local_buf; + int i = 0; + struct fsl_espi_transfer *espi_trans = trans; + + local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); + if (!local_buf) { + espi_trans->status = -ENOMEM; + return; + } + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) { + memcpy(local_buf + i, t->tx_buf, t->len); + i += t->len; + } + } + + espi_trans->tx_buf = local_buf; + espi_trans->rx_buf = local_buf + espi_trans->n_tx; + fsl_espi_do_trans(m, espi_trans); + + espi_trans->actual_length = espi_trans->len; + kfree(local_buf); +} + +static void fsl_espi_rw_trans(struct spi_message *m, + struct fsl_espi_transfer *trans, u8 *rx_buff) +{ + struct fsl_espi_transfer *espi_trans = trans; + unsigned int n_tx = espi_trans->n_tx; + unsigned int n_rx = espi_trans->n_rx; + struct spi_transfer *t; + u8 *local_buf; + u8 *rx_buf = rx_buff; + unsigned int trans_len; + unsigned int addr; + int i, pos, loop; + + local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); + if (!local_buf) { + espi_trans->status = -ENOMEM; + return; + } + + for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { + trans_len = n_rx - pos; + if (trans_len > SPCOM_TRANLEN_MAX - n_tx) + trans_len = SPCOM_TRANLEN_MAX - n_tx; + + i = 0; + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) { + memcpy(local_buf + i, t->tx_buf, t->len); + i += t->len; + } + } + + addr = fsl_espi_cmd2addr(local_buf); + addr += pos; + fsl_espi_addr2cmd(addr, local_buf); + + espi_trans->n_tx = n_tx; + espi_trans->n_rx = trans_len; + espi_trans->len = trans_len + n_tx; + espi_trans->tx_buf = local_buf; + espi_trans->rx_buf = local_buf + n_tx; + fsl_espi_do_trans(m, espi_trans); + + memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); + + if (loop > 0) + espi_trans->actual_length += espi_trans->len - n_tx; + else + espi_trans->actual_length += espi_trans->len; + } + + kfree(local_buf); +} + +static void fsl_espi_do_one_msg(struct spi_message *m) +{ + struct spi_transfer *t; + u8 *rx_buf = NULL; + unsigned int n_tx = 0; + unsigned int n_rx = 0; + struct fsl_espi_transfer espi_trans; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) + n_tx += t->len; + if (t->rx_buf) { + n_rx += t->len; + rx_buf = t->rx_buf; + } + } + + espi_trans.n_tx = n_tx; + espi_trans.n_rx = n_rx; + espi_trans.len = n_tx + n_rx; + espi_trans.actual_length = 0; + espi_trans.status = 0; + + if (!rx_buf) + fsl_espi_cmd_trans(m, &espi_trans, NULL); + else + fsl_espi_rw_trans(m, &espi_trans, rx_buf); + + m->actual_length = espi_trans.actual_length; + m->status = espi_trans.status; + m->complete(m->context); +} + +static int fsl_espi_setup(struct spi_device *spi) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_espi_reg *reg_base; + int retval; + u32 hw_mode; + u32 loop_mode; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (!spi->max_speed_hz) + return -EINVAL; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + mpc8xxx_spi = spi_master_get_devdata(spi->master); + reg_base = mpc8xxx_spi->reg_base; + + hw_mode = cs->hw_mode; /* Save orginal settings */ + cs->hw_mode = mpc8xxx_spi_read_reg( + ®_base->csmode[spi->chip_select]); + /* mask out bits we are going to set */ + cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH + | CSMODE_REV); + + if (spi->mode & SPI_CPHA) + cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; + if (spi->mode & SPI_CPOL) + cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; + if (!(spi->mode & SPI_LSB_FIRST)) + cs->hw_mode |= CSMODE_REV; + + /* Handle the loop mode */ + loop_mode = mpc8xxx_spi_read_reg(®_base->mode); + loop_mode &= ~SPMODE_LOOP; + if (spi->mode & SPI_LOOP) + loop_mode |= SPMODE_LOOP; + mpc8xxx_spi_write_reg(®_base->mode, loop_mode); + + retval = fsl_espi_setup_transfer(spi, NULL); + if (retval < 0) { + cs->hw_mode = hw_mode; /* Restore settings */ + return retval; + } + return 0; +} + +void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) +{ + struct fsl_espi_reg *reg_base = mspi->reg_base; + + /* We need handle RX first */ + if (events & SPIE_NE) { + u32 rx_data; + + /* Spin until RX is done */ + while (SPIE_RXCNT(events) < min(4, mspi->len)) { + cpu_relax(); + events = mpc8xxx_spi_read_reg(®_base->event); + } + mspi->len -= 4; + + rx_data = mpc8xxx_spi_read_reg(®_base->receive); + + if (mspi->rx) + mspi->get_rx(rx_data, mspi); + } + + if (!(events & SPIE_NF)) { + int ret; + + /* spin until TX is done */ + ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( + ®_base->event)) & SPIE_NF) == 0, 1000, 0); + if (!ret) { + dev_err(mspi->dev, "tired waiting for SPIE_NF\n"); + return; + } + } + + /* Clear the events */ + mpc8xxx_spi_write_reg(®_base->event, events); + + mspi->count -= 1; + if (mspi->count) { + u32 word = mspi->get_tx(mspi); + + mpc8xxx_spi_write_reg(®_base->transmit, word); + } else { + complete(&mspi->done); + } +} + +static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) +{ + struct mpc8xxx_spi *mspi = context_data; + struct fsl_espi_reg *reg_base = mspi->reg_base; + irqreturn_t ret = IRQ_NONE; + u32 events; + + /* Get interrupt events(tx/rx) */ + events = mpc8xxx_spi_read_reg(®_base->event); + if (events) + ret = IRQ_HANDLED; + + dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); + + fsl_espi_cpu_irq(mspi, events); + + return ret; +} + +static void fsl_espi_remove(struct mpc8xxx_spi *mspi) +{ + iounmap(mspi->reg_base); +} + +static struct spi_master * __devinit fsl_espi_probe(struct device *dev, + struct resource *mem, unsigned int irq) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct spi_master *master; + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_espi_reg *reg_base; + u32 regval; + int i, ret = 0; + + master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); + if (!master) { + ret = -ENOMEM; + goto err; + } + + dev_set_drvdata(dev, master); + + ret = mpc8xxx_spi_probe(dev, mem, irq); + if (ret) + goto err_probe; + + master->setup = fsl_espi_setup; + + mpc8xxx_spi = spi_master_get_devdata(master); + mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; + mpc8xxx_spi->spi_remove = fsl_espi_remove; + + mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); + if (!mpc8xxx_spi->reg_base) { + ret = -ENOMEM; + goto err_probe; + } + + reg_base = mpc8xxx_spi->reg_base; + + /* Register for SPI Interrupt */ + ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq, + 0, "fsl_espi", mpc8xxx_spi); + if (ret) + goto free_irq; + + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { + mpc8xxx_spi->rx_shift = 16; + mpc8xxx_spi->tx_shift = 24; + } + + /* SPI controller initializations */ + mpc8xxx_spi_write_reg(®_base->mode, 0); + mpc8xxx_spi_write_reg(®_base->mask, 0); + mpc8xxx_spi_write_reg(®_base->command, 0); + mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); + + /* Init eSPI CS mode register */ + for (i = 0; i < pdata->max_chipselect; i++) + mpc8xxx_spi_write_reg(®_base->csmode[i], CSMODE_INIT_VAL); + + /* Enable SPI interface */ + regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; + + mpc8xxx_spi_write_reg(®_base->mode, regval); + + ret = spi_register_master(master); + if (ret < 0) + goto unreg_master; + + dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq); + + return master; + +unreg_master: + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); +free_irq: + iounmap(mpc8xxx_spi->reg_base); +err_probe: + spi_master_put(master); +err: + return ERR_PTR(ret); +} + +static int of_fsl_espi_get_chipselects(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct fsl_spi_platform_data *pdata = dev->platform_data; + const u32 *prop; + int len; + + prop = of_get_property(np, "fsl,espi-num-chipselects", &len); + if (!prop || len < sizeof(*prop)) { + dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); + return -EINVAL; + } + + pdata->max_chipselect = *prop; + pdata->cs_control = NULL; + + return 0; +} + +static int __devinit of_fsl_espi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct spi_master *master; + struct resource mem; + struct resource irq; + int ret = -ENOMEM; + + ret = of_mpc8xxx_spi_probe(ofdev, ofid); + if (ret) + return ret; + + ret = of_fsl_espi_get_chipselects(dev); + if (ret) + goto err; + + ret = of_address_to_resource(np, 0, &mem); + if (ret) + goto err; + + ret = of_irq_to_resource(np, 0, &irq); + if (!ret) { + ret = -EINVAL; + goto err; + } + + master = fsl_espi_probe(dev, &mem, irq.start); + if (IS_ERR(master)) { + ret = PTR_ERR(master); + goto err; + } + + return 0; + +err: + return ret; +} + +static int __devexit of_fsl_espi_remove(struct platform_device *dev) +{ + return mpc8xxx_spi_remove(&dev->dev); +} + +static const struct of_device_id of_fsl_espi_match[] = { + { .compatible = "fsl,mpc8536-espi" }, + {} +}; +MODULE_DEVICE_TABLE(of, of_fsl_espi_match); + +static struct of_platform_driver fsl_espi_driver = { + .driver = { + .name = "fsl_espi", + .owner = THIS_MODULE, + .of_match_table = of_fsl_espi_match, + }, + .probe = of_fsl_espi_probe, + .remove = __devexit_p(of_fsl_espi_remove), +}; + +static int __init fsl_espi_init(void) +{ + return of_register_platform_driver(&fsl_espi_driver); +} +module_init(fsl_espi_init); + +static void __exit fsl_espi_exit(void) +{ + of_unregister_platform_driver(&fsl_espi_driver); +} +module_exit(fsl_espi_exit); + +MODULE_AUTHOR("Mingkai Hu"); +MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_fsl_lib.c b/drivers/spi/spi_fsl_lib.c new file mode 100644 index 00000000000..5cd741fdb5c --- /dev/null +++ b/drivers/spi/spi_fsl_lib.c @@ -0,0 +1,237 @@ +/* + * Freescale SPI/eSPI controller driver library. + * + * Maintainer: Kumar Gala + * + * Copyright (C) 2006 Polycom, Inc. + * + * CPM SPI and QE buffer descriptors mode support: + * Copyright (c) 2009 MontaVista Software, Inc. + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * Copyright 2010 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/fsl_devices.h> +#include <linux/dma-mapping.h> +#include <linux/mm.h> +#include <linux/of_platform.h> +#include <linux/of_spi.h> +#include <sysdev/fsl_soc.h> + +#include "spi_fsl_lib.h" + +#define MPC8XXX_SPI_RX_BUF(type) \ +void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ +{ \ + type *rx = mpc8xxx_spi->rx; \ + *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ + mpc8xxx_spi->rx = rx; \ +} + +#define MPC8XXX_SPI_TX_BUF(type) \ +u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ +{ \ + u32 data; \ + const type *tx = mpc8xxx_spi->tx; \ + if (!tx) \ + return 0; \ + data = *tx++ << mpc8xxx_spi->tx_shift; \ + mpc8xxx_spi->tx = tx; \ + return data; \ +} + +MPC8XXX_SPI_RX_BUF(u8) +MPC8XXX_SPI_RX_BUF(u16) +MPC8XXX_SPI_RX_BUF(u32) +MPC8XXX_SPI_TX_BUF(u8) +MPC8XXX_SPI_TX_BUF(u16) +MPC8XXX_SPI_TX_BUF(u32) + +struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata) +{ + return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); +} + +void mpc8xxx_spi_work(struct work_struct *work) +{ + struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, + work); + + spin_lock_irq(&mpc8xxx_spi->lock); + while (!list_empty(&mpc8xxx_spi->queue)) { + struct spi_message *m = container_of(mpc8xxx_spi->queue.next, + struct spi_message, queue); + + list_del_init(&m->queue); + spin_unlock_irq(&mpc8xxx_spi->lock); + + if (mpc8xxx_spi->spi_do_one_msg) + mpc8xxx_spi->spi_do_one_msg(m); + + spin_lock_irq(&mpc8xxx_spi->lock); + } + spin_unlock_irq(&mpc8xxx_spi->lock); +} + +int mpc8xxx_spi_transfer(struct spi_device *spi, + struct spi_message *m) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&mpc8xxx_spi->lock, flags); + list_add_tail(&m->queue, &mpc8xxx_spi->queue); + queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); + spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); + + return 0; +} + +void mpc8xxx_spi_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +const char *mpc8xxx_spi_strmode(unsigned int flags) +{ + if (flags & SPI_QE_CPU_MODE) { + return "QE CPU"; + } else if (flags & SPI_CPM_MODE) { + if (flags & SPI_QE) + return "QE"; + else if (flags & SPI_CPM2) + return "CPM2"; + else + return "CPM1"; + } + return "CPU"; +} + +int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, + unsigned int irq) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct spi_master *master; + struct mpc8xxx_spi *mpc8xxx_spi; + int ret = 0; + + master = dev_get_drvdata(dev); + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH + | SPI_LSB_FIRST | SPI_LOOP; + + master->transfer = mpc8xxx_spi_transfer; + master->cleanup = mpc8xxx_spi_cleanup; + master->dev.of_node = dev->of_node; + + mpc8xxx_spi = spi_master_get_devdata(master); + mpc8xxx_spi->dev = dev; + mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; + mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; + mpc8xxx_spi->flags = pdata->flags; + mpc8xxx_spi->spibrg = pdata->sysclk; + mpc8xxx_spi->irq = irq; + + mpc8xxx_spi->rx_shift = 0; + mpc8xxx_spi->tx_shift = 0; + + init_completion(&mpc8xxx_spi->done); + + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->max_chipselect; + + spin_lock_init(&mpc8xxx_spi->lock); + init_completion(&mpc8xxx_spi->done); + INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); + INIT_LIST_HEAD(&mpc8xxx_spi->queue); + + mpc8xxx_spi->workqueue = create_singlethread_workqueue( + dev_name(master->dev.parent)); + if (mpc8xxx_spi->workqueue == NULL) { + ret = -EBUSY; + goto err; + } + + return 0; + +err: + return ret; +} + +int __devexit mpc8xxx_spi_remove(struct device *dev) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + struct spi_master *master; + + master = dev_get_drvdata(dev); + mpc8xxx_spi = spi_master_get_devdata(master); + + flush_workqueue(mpc8xxx_spi->workqueue); + destroy_workqueue(mpc8xxx_spi->workqueue); + spi_unregister_master(master); + + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); + + if (mpc8xxx_spi->spi_remove) + mpc8xxx_spi->spi_remove(mpc8xxx_spi); + + return 0; +} + +int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct mpc8xxx_spi_probe_info *pinfo; + struct fsl_spi_platform_data *pdata; + const void *prop; + int ret = -ENOMEM; + + pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + pdata = &pinfo->pdata; + dev->platform_data = pdata; + + /* Allocate bus num dynamically. */ + pdata->bus_num = -1; + + /* SPI controller is either clocked from QE or SoC clock. */ + pdata->sysclk = get_brgfreq(); + if (pdata->sysclk == -1) { + pdata->sysclk = fsl_get_sys_freq(); + if (pdata->sysclk == -1) { + ret = -ENODEV; + goto err; + } + } + + prop = of_get_property(np, "mode", NULL); + if (prop && !strcmp(prop, "cpu-qe")) + pdata->flags = SPI_QE_CPU_MODE; + else if (prop && !strcmp(prop, "qe")) + pdata->flags = SPI_CPM_MODE | SPI_QE; + else if (of_device_is_compatible(np, "fsl,cpm2-spi")) + pdata->flags = SPI_CPM_MODE | SPI_CPM2; + else if (of_device_is_compatible(np, "fsl,cpm1-spi")) + pdata->flags = SPI_CPM_MODE | SPI_CPM1; + + return 0; + +err: + kfree(pinfo); + return ret; +} diff --git a/drivers/spi/spi_fsl_lib.h b/drivers/spi/spi_fsl_lib.h new file mode 100644 index 00000000000..281e060977c --- /dev/null +++ b/drivers/spi/spi_fsl_lib.h @@ -0,0 +1,124 @@ +/* + * Freescale SPI/eSPI controller driver library. + * + * Maintainer: Kumar Gala + * + * Copyright 2010 Freescale Semiconductor, Inc. + * Copyright (C) 2006 Polycom, Inc. + * + * CPM SPI and QE buffer descriptors mode support: + * Copyright (c) 2009 MontaVista Software, Inc. + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __SPI_FSL_LIB_H__ +#define __SPI_FSL_LIB_H__ + +#include <asm/io.h> + +/* SPI/eSPI Controller driver's private data. */ +struct mpc8xxx_spi { + struct device *dev; + void *reg_base; + + /* rx & tx bufs from the spi_transfer */ + const void *tx; + void *rx; +#ifdef CONFIG_SPI_FSL_ESPI + int len; +#endif + + int subblock; + struct spi_pram __iomem *pram; + struct cpm_buf_desc __iomem *tx_bd; + struct cpm_buf_desc __iomem *rx_bd; + + struct spi_transfer *xfer_in_progress; + + /* dma addresses for CPM transfers */ + dma_addr_t tx_dma; + dma_addr_t rx_dma; + bool map_tx_dma; + bool map_rx_dma; + + dma_addr_t dma_dummy_tx; + dma_addr_t dma_dummy_rx; + + /* functions to deal with different sized buffers */ + void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); + u32(*get_tx) (struct mpc8xxx_spi *); + + /* hooks for different controller driver */ + void (*spi_do_one_msg) (struct spi_message *m); + void (*spi_remove) (struct mpc8xxx_spi *mspi); + + unsigned int count; + unsigned int irq; + + unsigned nsecs; /* (clock cycle time)/2 */ + + u32 spibrg; /* SPIBRG input clock */ + u32 rx_shift; /* RX data reg shift when in qe mode */ + u32 tx_shift; /* TX data reg shift when in qe mode */ + + unsigned int flags; + + struct workqueue_struct *workqueue; + struct work_struct work; + + struct list_head queue; + spinlock_t lock; + + struct completion done; +}; + +struct spi_mpc8xxx_cs { + /* functions to deal with different sized buffers */ + void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); + u32 (*get_tx) (struct mpc8xxx_spi *); + u32 rx_shift; /* RX data reg shift when in qe mode */ + u32 tx_shift; /* TX data reg shift when in qe mode */ + u32 hw_mode; /* Holds HW mode register settings */ +}; + +static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) +{ + out_be32(reg, val); +} + +static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) +{ + return in_be32(reg); +} + +struct mpc8xxx_spi_probe_info { + struct fsl_spi_platform_data pdata; + int *gpios; + bool *alow_flags; +}; + +extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi); +extern u32 mpc8xxx_spi_tx_buf_u16(struct mpc8xxx_spi *mpc8xxx_spi); +extern u32 mpc8xxx_spi_tx_buf_u32(struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u8(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u16(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); + +extern struct mpc8xxx_spi_probe_info *to_of_pinfo( + struct fsl_spi_platform_data *pdata); +extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi, + struct spi_transfer *t, unsigned int len); +extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m); +extern void mpc8xxx_spi_cleanup(struct spi_device *spi); +extern const char *mpc8xxx_spi_strmode(unsigned int flags); +extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, + unsigned int irq); +extern int mpc8xxx_spi_remove(struct device *dev); +extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid); + +#endif /* __SPI_FSL_LIB_H__ */ diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_fsl_spi.c index 1fb2a6ea328..7ca52d3ae8f 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_fsl_spi.c @@ -1,9 +1,10 @@ /* - * MPC8xxx SPI controller driver. + * Freescale SPI controller driver. * * Maintainer: Kumar Gala * * Copyright (C) 2006 Polycom, Inc. + * Copyright 2010 Freescale Semiconductor, Inc. * * CPM SPI and QE buffer descriptors mode support: * Copyright (c) 2009 MontaVista Software, Inc. @@ -15,18 +16,11 @@ * option) any later version. */ #include <linux/module.h> -#include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> -#include <linux/bug.h> -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/io.h> -#include <linux/completion.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/irq.h> -#include <linux/device.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/platform_device.h> @@ -38,12 +32,12 @@ #include <linux/of_platform.h> #include <linux/gpio.h> #include <linux/of_gpio.h> -#include <linux/of_spi.h> #include <sysdev/fsl_soc.h> #include <asm/cpm.h> #include <asm/qe.h> -#include <asm/irq.h> + +#include "spi_fsl_lib.h" /* CPM1 and CPM2 are mutually exclusive. */ #ifdef CONFIG_CPM1 @@ -55,7 +49,7 @@ #endif /* SPI Controller registers */ -struct mpc8xxx_spi_reg { +struct fsl_spi_reg { u8 res1[0x20]; __be32 mode; __be32 event; @@ -65,28 +59,6 @@ struct mpc8xxx_spi_reg { __be32 receive; }; -/* SPI Parameter RAM */ -struct spi_pram { - __be16 rbase; /* Rx Buffer descriptor base address */ - __be16 tbase; /* Tx Buffer descriptor base address */ - u8 rfcr; /* Rx function code */ - u8 tfcr; /* Tx function code */ - __be16 mrblr; /* Max receive buffer length */ - __be32 rstate; /* Internal */ - __be32 rdp; /* Internal */ - __be16 rbptr; /* Internal */ - __be16 rbc; /* Internal */ - __be32 rxtmp; /* Internal */ - __be32 tstate; /* Internal */ - __be32 tdp; /* Internal */ - __be16 tbptr; /* Internal */ - __be16 tbc; /* Internal */ - __be32 txtmp; /* Internal */ - __be32 res; /* Tx temp. */ - __be16 rpbase; /* Relocation pointer (CPM1 only) */ - __be16 res1; /* Reserved */ -}; - /* SPI Controller mode register definitions */ #define SPMODE_LOOP (1 << 30) #define SPMODE_CI_INACTIVEHIGH (1 << 29) @@ -102,7 +74,7 @@ struct spi_pram { /* * Default for SPI Mode: - * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk + * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk */ #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) @@ -124,112 +96,16 @@ struct spi_pram { #define SPI_PRAM_SIZE 0x100 #define SPI_MRBLR ((unsigned int)PAGE_SIZE) -/* SPI Controller driver's private data. */ -struct mpc8xxx_spi { - struct device *dev; - struct mpc8xxx_spi_reg __iomem *base; - - /* rx & tx bufs from the spi_transfer */ - const void *tx; - void *rx; - - int subblock; - struct spi_pram __iomem *pram; - struct cpm_buf_desc __iomem *tx_bd; - struct cpm_buf_desc __iomem *rx_bd; - - struct spi_transfer *xfer_in_progress; - - /* dma addresses for CPM transfers */ - dma_addr_t tx_dma; - dma_addr_t rx_dma; - bool map_tx_dma; - bool map_rx_dma; - - dma_addr_t dma_dummy_tx; - dma_addr_t dma_dummy_rx; - - /* functions to deal with different sized buffers */ - void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); - u32(*get_tx) (struct mpc8xxx_spi *); - - unsigned int count; - unsigned int irq; - - unsigned nsecs; /* (clock cycle time)/2 */ +static void *fsl_dummy_rx; +static DEFINE_MUTEX(fsl_dummy_rx_lock); +static int fsl_dummy_rx_refcnt; - u32 spibrg; /* SPIBRG input clock */ - u32 rx_shift; /* RX data reg shift when in qe mode */ - u32 tx_shift; /* TX data reg shift when in qe mode */ - - unsigned int flags; - - struct workqueue_struct *workqueue; - struct work_struct work; - - struct list_head queue; - spinlock_t lock; - - struct completion done; -}; - -static void *mpc8xxx_dummy_rx; -static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock); -static int mpc8xxx_dummy_rx_refcnt; - -struct spi_mpc8xxx_cs { - /* functions to deal with different sized buffers */ - void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); - u32 (*get_tx) (struct mpc8xxx_spi *); - u32 rx_shift; /* RX data reg shift when in qe mode */ - u32 tx_shift; /* TX data reg shift when in qe mode */ - u32 hw_mode; /* Holds HW mode register settings */ -}; - -static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) -{ - out_be32(reg, val); -} - -static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) -{ - return in_be32(reg); -} - -#define MPC83XX_SPI_RX_BUF(type) \ -static \ -void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ -{ \ - type *rx = mpc8xxx_spi->rx; \ - *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ - mpc8xxx_spi->rx = rx; \ -} - -#define MPC83XX_SPI_TX_BUF(type) \ -static \ -u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ -{ \ - u32 data; \ - const type *tx = mpc8xxx_spi->tx; \ - if (!tx) \ - return 0; \ - data = *tx++ << mpc8xxx_spi->tx_shift; \ - mpc8xxx_spi->tx = tx; \ - return data; \ -} - -MPC83XX_SPI_RX_BUF(u8) -MPC83XX_SPI_RX_BUF(u16) -MPC83XX_SPI_RX_BUF(u32) -MPC83XX_SPI_TX_BUF(u8) -MPC83XX_SPI_TX_BUF(u16) -MPC83XX_SPI_TX_BUF(u32) - -static void mpc8xxx_spi_change_mode(struct spi_device *spi) +static void fsl_spi_change_mode(struct spi_device *spi) { struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); struct spi_mpc8xxx_cs *cs = spi->controller_state; - __be32 __iomem *mode = &mspi->base->mode; + struct fsl_spi_reg *reg_base = mspi->reg_base; + __be32 __iomem *mode = ®_base->mode; unsigned long flags; if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) @@ -240,7 +116,6 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi) /* Turn off SPI unit prior changing mode */ mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE); - mpc8xxx_spi_write_reg(mode, cs->hw_mode); /* When in CPM mode, we need to reinit tx and rx. */ if (mspi->flags & SPI_CPM_MODE) { @@ -257,11 +132,11 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi) } } } - + mpc8xxx_spi_write_reg(mode, cs->hw_mode); local_irq_restore(flags); } -static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) +static void fsl_spi_chipselect(struct spi_device *spi, int value) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; @@ -279,43 +154,18 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) mpc8xxx_spi->get_rx = cs->get_rx; mpc8xxx_spi->get_tx = cs->get_tx; - mpc8xxx_spi_change_mode(spi); + fsl_spi_change_mode(spi); if (pdata->cs_control) pdata->cs_control(spi, pol); } } -static -int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) +static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, + struct spi_device *spi, + struct mpc8xxx_spi *mpc8xxx_spi, + int bits_per_word) { - struct mpc8xxx_spi *mpc8xxx_spi; - u8 bits_per_word, pm; - u32 hz; - struct spi_mpc8xxx_cs *cs = spi->controller_state; - - mpc8xxx_spi = spi_master_get_devdata(spi->master); - - if (t) { - bits_per_word = t->bits_per_word; - hz = t->speed_hz; - } else { - bits_per_word = 0; - hz = 0; - } - - /* spi_transfer level calls that work per-word */ - if (!bits_per_word) - bits_per_word = spi->bits_per_word; - - /* Make sure its a bit width we support [4..16, 32] */ - if ((bits_per_word < 4) - || ((bits_per_word > 16) && (bits_per_word != 32))) - return -EINVAL; - - if (!hz) - hz = spi->max_speed_hz; - cs->rx_shift = 0; cs->tx_shift = 0; if (bits_per_word <= 8) { @@ -339,19 +189,78 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) return -EINVAL; if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE && - spi->mode & SPI_LSB_FIRST) { + spi->mode & SPI_LSB_FIRST) { cs->tx_shift = 0; if (bits_per_word <= 8) cs->rx_shift = 8; else cs->rx_shift = 0; } - mpc8xxx_spi->rx_shift = cs->rx_shift; mpc8xxx_spi->tx_shift = cs->tx_shift; mpc8xxx_spi->get_rx = cs->get_rx; mpc8xxx_spi->get_tx = cs->get_tx; + return bits_per_word; +} + +static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, + struct spi_device *spi, + int bits_per_word) +{ + /* QE uses Little Endian for words > 8 + * so transform all words > 8 into 8 bits + * Unfortnatly that doesn't work for LSB so + * reject these for now */ + /* Note: 32 bits word, LSB works iff + * tfcr/rfcr is set to CPMFCR_GBL */ + if (spi->mode & SPI_LSB_FIRST && + bits_per_word > 8) + return -EINVAL; + if (bits_per_word > 8) + return 8; /* pretend its 8 bits */ + return bits_per_word; +} + +static int fsl_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + int bits_per_word = 0; + u8 pm; + u32 hz = 0; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + mpc8xxx_spi = spi_master_get_devdata(spi->master); + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } + + /* spi_transfer level calls that work per-word */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + + /* Make sure its a bit width we support [4..16, 32] */ + if ((bits_per_word < 4) + || ((bits_per_word > 16) && (bits_per_word != 32))) + return -EINVAL; + + if (!hz) + hz = spi->max_speed_hz; + + if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) + bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi, + mpc8xxx_spi, + bits_per_word); + else if (mpc8xxx_spi->flags & SPI_QE) + bits_per_word = mspi_apply_qe_mode_quirks(cs, spi, + bits_per_word); + + if (bits_per_word < 0) + return bits_per_word; + if (bits_per_word == 32) bits_per_word = 0; else @@ -365,50 +274,59 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) if ((mpc8xxx_spi->spibrg / hz) > 64) { cs->hw_mode |= SPMODE_DIV16; - pm = mpc8xxx_spi->spibrg / (hz * 64); + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " "Will use %d Hz instead.\n", dev_name(&spi->dev), hz, mpc8xxx_spi->spibrg / 1024); if (pm > 16) pm = 16; - } else - pm = mpc8xxx_spi->spibrg / (hz * 4); + } else { + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; + } if (pm) pm--; cs->hw_mode |= SPMODE_PM(pm); - mpc8xxx_spi_change_mode(spi); + fsl_spi_change_mode(spi); return 0; } -static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) +static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) { struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; unsigned int xfer_len = min(mspi->count, SPI_MRBLR); unsigned int xfer_ofs; + struct fsl_spi_reg *reg_base = mspi->reg_base; xfer_ofs = mspi->xfer_in_progress->len - mspi->count; - out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); + if (mspi->rx_dma == mspi->dma_dummy_rx) + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); + else + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); out_be16(&rx_bd->cbd_datlen, 0); out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); - out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); + if (mspi->tx_dma == mspi->dma_dummy_tx) + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); + else + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); out_be16(&tx_bd->cbd_datlen, xfer_len); out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | BD_SC_LAST); /* start transfer */ - mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR); + mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR); } -static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi, +static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, bool is_dma_mapped) { struct device *dev = mspi->dev; + struct fsl_spi_reg *reg_base = mspi->reg_base; if (is_dma_mapped) { mspi->map_tx_dma = 0; @@ -437,7 +355,7 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi, dev_err(dev, "unable to map tx dma\n"); return -ENOMEM; } - } else { + } else if (t->tx_buf) { mspi->tx_dma = t->tx_dma; } @@ -448,18 +366,18 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi, dev_err(dev, "unable to map rx dma\n"); goto err_rx_dma; } - } else { + } else if (t->rx_buf) { mspi->rx_dma = t->rx_dma; } /* enable rx ints */ - mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB); + mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB); mspi->xfer_in_progress = t; mspi->count = t->len; /* start CPM transfers */ - mpc8xxx_spi_cpm_bufs_start(mspi); + fsl_spi_cpm_bufs_start(mspi); return 0; @@ -469,43 +387,46 @@ err_rx_dma: return -ENOMEM; } -static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) +static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) { struct device *dev = mspi->dev; struct spi_transfer *t = mspi->xfer_in_progress; if (mspi->map_tx_dma) dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); - if (mspi->map_tx_dma) + if (mspi->map_rx_dma) dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); mspi->xfer_in_progress = NULL; } -static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi, +static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, unsigned int len) { u32 word; + struct fsl_spi_reg *reg_base = mspi->reg_base; mspi->count = len; /* enable rx ints */ - mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE); + mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); /* transmit word */ word = mspi->get_tx(mspi); - mpc8xxx_spi_write_reg(&mspi->base->transmit, word); + mpc8xxx_spi_write_reg(®_base->transmit, word); return 0; } -static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t, +static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t, bool is_dma_mapped) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct fsl_spi_reg *reg_base; unsigned int len = t->len; u8 bits_per_word; int ret; + reg_base = mpc8xxx_spi->reg_base; bits_per_word = spi->bits_per_word; if (t->bits_per_word) bits_per_word = t->bits_per_word; @@ -529,24 +450,24 @@ static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t, INIT_COMPLETION(mpc8xxx_spi->done); if (mpc8xxx_spi->flags & SPI_CPM_MODE) - ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); + ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); else - ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len); + ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len); if (ret) return ret; wait_for_completion(&mpc8xxx_spi->done); /* disable rx ints */ - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); + mpc8xxx_spi_write_reg(®_base->mask, 0); if (mpc8xxx_spi->flags & SPI_CPM_MODE) - mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi); + fsl_spi_cpm_bufs_complete(mpc8xxx_spi); return mpc8xxx_spi->count; } -static void mpc8xxx_spi_do_one_msg(struct spi_message *m) +static void fsl_spi_do_one_msg(struct spi_message *m) { struct spi_device *spi = m->spi; struct spi_transfer *t; @@ -562,18 +483,18 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m) status = -EINVAL; if (cs_change) - status = mpc8xxx_spi_setup_transfer(spi, t); + status = fsl_spi_setup_transfer(spi, t); if (status < 0) break; } if (cs_change) { - mpc8xxx_spi_chipselect(spi, BITBANG_CS_ACTIVE); + fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE); ndelay(nsecs); } cs_change = t->cs_change; if (t->len) - status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped); + status = fsl_spi_bufs(spi, t, m->is_dma_mapped); if (status) { status = -EMSGSIZE; break; @@ -585,7 +506,7 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m) if (cs_change) { ndelay(nsecs); - mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); + fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); ndelay(nsecs); } } @@ -595,35 +516,16 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m) if (status || !cs_change) { ndelay(nsecs); - mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); + fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); } - mpc8xxx_spi_setup_transfer(spi, NULL); + fsl_spi_setup_transfer(spi, NULL); } -static void mpc8xxx_spi_work(struct work_struct *work) -{ - struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, - work); - - spin_lock_irq(&mpc8xxx_spi->lock); - while (!list_empty(&mpc8xxx_spi->queue)) { - struct spi_message *m = container_of(mpc8xxx_spi->queue.next, - struct spi_message, queue); - - list_del_init(&m->queue); - spin_unlock_irq(&mpc8xxx_spi->lock); - - mpc8xxx_spi_do_one_msg(m); - - spin_lock_irq(&mpc8xxx_spi->lock); - } - spin_unlock_irq(&mpc8xxx_spi->lock); -} - -static int mpc8xxx_spi_setup(struct spi_device *spi) +static int fsl_spi_setup(struct spi_device *spi) { struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_spi_reg *reg_base; int retval; u32 hw_mode; struct spi_mpc8xxx_cs *cs = spi->controller_state; @@ -639,8 +541,10 @@ static int mpc8xxx_spi_setup(struct spi_device *spi) } mpc8xxx_spi = spi_master_get_devdata(spi->master); - hw_mode = cs->hw_mode; /* Save orginal settings */ - cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode); + reg_base = mpc8xxx_spi->reg_base; + + hw_mode = cs->hw_mode; /* Save original settings */ + cs->hw_mode = mpc8xxx_spi_read_reg(®_base->mode); /* mask out bits we are going to set */ cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH | SPMODE_REV | SPMODE_LOOP); @@ -654,7 +558,7 @@ static int mpc8xxx_spi_setup(struct spi_device *spi) if (spi->mode & SPI_LOOP) cs->hw_mode |= SPMODE_LOOP; - retval = mpc8xxx_spi_setup_transfer(spi, NULL); + retval = fsl_spi_setup_transfer(spi, NULL); if (retval < 0) { cs->hw_mode = hw_mode; /* Restore settings */ return retval; @@ -662,9 +566,10 @@ static int mpc8xxx_spi_setup(struct spi_device *spi) return 0; } -static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) +static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) { u16 len; + struct fsl_spi_reg *reg_base = mspi->reg_base; dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); @@ -676,20 +581,22 @@ static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) } /* Clear the events */ - mpc8xxx_spi_write_reg(&mspi->base->event, events); + mpc8xxx_spi_write_reg(®_base->event, events); mspi->count -= len; if (mspi->count) - mpc8xxx_spi_cpm_bufs_start(mspi); + fsl_spi_cpm_bufs_start(mspi); else complete(&mspi->done); } -static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) +static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) { + struct fsl_spi_reg *reg_base = mspi->reg_base; + /* We need handle RX first */ if (events & SPIE_NE) { - u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive); + u32 rx_data = mpc8xxx_spi_read_reg(®_base->receive); if (mspi->rx) mspi->get_rx(rx_data, mspi); @@ -698,105 +605,83 @@ static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) if ((events & SPIE_NF) == 0) /* spin until TX is done */ while (((events = - mpc8xxx_spi_read_reg(&mspi->base->event)) & + mpc8xxx_spi_read_reg(®_base->event)) & SPIE_NF) == 0) cpu_relax(); /* Clear the events */ - mpc8xxx_spi_write_reg(&mspi->base->event, events); + mpc8xxx_spi_write_reg(®_base->event, events); mspi->count -= 1; if (mspi->count) { u32 word = mspi->get_tx(mspi); - mpc8xxx_spi_write_reg(&mspi->base->transmit, word); + mpc8xxx_spi_write_reg(®_base->transmit, word); } else { complete(&mspi->done); } } -static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data) +static irqreturn_t fsl_spi_irq(s32 irq, void *context_data) { struct mpc8xxx_spi *mspi = context_data; irqreturn_t ret = IRQ_NONE; u32 events; + struct fsl_spi_reg *reg_base = mspi->reg_base; /* Get interrupt events(tx/rx) */ - events = mpc8xxx_spi_read_reg(&mspi->base->event); + events = mpc8xxx_spi_read_reg(®_base->event); if (events) ret = IRQ_HANDLED; dev_dbg(mspi->dev, "%s: events %x\n", __func__, events); if (mspi->flags & SPI_CPM_MODE) - mpc8xxx_spi_cpm_irq(mspi, events); + fsl_spi_cpm_irq(mspi, events); else - mpc8xxx_spi_cpu_irq(mspi, events); + fsl_spi_cpu_irq(mspi, events); return ret; } -static int mpc8xxx_spi_transfer(struct spi_device *spi, - struct spi_message *m) -{ - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); - unsigned long flags; - - m->actual_length = 0; - m->status = -EINPROGRESS; - - spin_lock_irqsave(&mpc8xxx_spi->lock, flags); - list_add_tail(&m->queue, &mpc8xxx_spi->queue); - queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); - spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); - - return 0; -} - - -static void mpc8xxx_spi_cleanup(struct spi_device *spi) -{ - kfree(spi->controller_state); -} - -static void *mpc8xxx_spi_alloc_dummy_rx(void) +static void *fsl_spi_alloc_dummy_rx(void) { - mutex_lock(&mpc8xxx_dummy_rx_lock); + mutex_lock(&fsl_dummy_rx_lock); - if (!mpc8xxx_dummy_rx) - mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); - if (mpc8xxx_dummy_rx) - mpc8xxx_dummy_rx_refcnt++; + if (!fsl_dummy_rx) + fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); + if (fsl_dummy_rx) + fsl_dummy_rx_refcnt++; - mutex_unlock(&mpc8xxx_dummy_rx_lock); + mutex_unlock(&fsl_dummy_rx_lock); - return mpc8xxx_dummy_rx; + return fsl_dummy_rx; } -static void mpc8xxx_spi_free_dummy_rx(void) +static void fsl_spi_free_dummy_rx(void) { - mutex_lock(&mpc8xxx_dummy_rx_lock); + mutex_lock(&fsl_dummy_rx_lock); - switch (mpc8xxx_dummy_rx_refcnt) { + switch (fsl_dummy_rx_refcnt) { case 0: WARN_ON(1); break; case 1: - kfree(mpc8xxx_dummy_rx); - mpc8xxx_dummy_rx = NULL; + kfree(fsl_dummy_rx); + fsl_dummy_rx = NULL; /* fall through */ default: - mpc8xxx_dummy_rx_refcnt--; + fsl_dummy_rx_refcnt--; break; } - mutex_unlock(&mpc8xxx_dummy_rx_lock); + mutex_unlock(&fsl_dummy_rx_lock); } -static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) +static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) { struct device *dev = mspi->dev; - struct device_node *np = dev_archdata_get_node(&dev->archdata); + struct device_node *np = dev->of_node; const u32 *iprop; int size; unsigned long spi_base_ofs; @@ -847,10 +732,10 @@ static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) return pram_ofs; } -static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) +static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) { struct device *dev = mspi->dev; - struct device_node *np = dev_archdata_get_node(&dev->archdata); + struct device_node *np = dev->of_node; const u32 *iprop; int size; unsigned long pram_ofs; @@ -859,7 +744,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) if (!(mspi->flags & SPI_CPM_MODE)) return 0; - if (!mpc8xxx_spi_alloc_dummy_rx()) + if (!fsl_spi_alloc_dummy_rx()) return -ENOMEM; if (mspi->flags & SPI_QE) { @@ -880,7 +765,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) } } - pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi); + pram_ofs = fsl_spi_cpm_get_pram(mspi); if (IS_ERR_VALUE(pram_ofs)) { dev_err(dev, "can't allocate spi parameter ram\n"); goto err_pram; @@ -900,7 +785,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) goto err_dummy_tx; } - mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR, + mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { dev_err(dev, "unable to map dummy rx buffer\n"); @@ -938,11 +823,11 @@ err_dummy_tx: err_bds: cpm_muram_free(pram_ofs); err_pram: - mpc8xxx_spi_free_dummy_rx(); + fsl_spi_free_dummy_rx(); return -ENOMEM; } -static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi) +static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) { struct device *dev = mspi->dev; @@ -950,30 +835,22 @@ static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi) dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); cpm_muram_free(cpm_muram_offset(mspi->pram)); - mpc8xxx_spi_free_dummy_rx(); + fsl_spi_free_dummy_rx(); } -static const char *mpc8xxx_spi_strmode(unsigned int flags) +static void fsl_spi_remove(struct mpc8xxx_spi *mspi) { - if (flags & SPI_QE_CPU_MODE) { - return "QE CPU"; - } else if (flags & SPI_CPM_MODE) { - if (flags & SPI_QE) - return "QE"; - else if (flags & SPI_CPM2) - return "CPM2"; - else - return "CPM1"; - } - return "CPU"; + iounmap(mspi->reg_base); + fsl_spi_cpm_free(mspi); } -static struct spi_master * __devinit -mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) +static struct spi_master * __devinit fsl_spi_probe(struct device *dev, + struct resource *mem, unsigned int irq) { struct fsl_spi_platform_data *pdata = dev->platform_data; struct spi_master *master; struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_spi_reg *reg_base; u32 regval; int ret = 0; @@ -985,131 +862,77 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) dev_set_drvdata(dev, master); - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH - | SPI_LSB_FIRST | SPI_LOOP; + ret = mpc8xxx_spi_probe(dev, mem, irq); + if (ret) + goto err_probe; - master->setup = mpc8xxx_spi_setup; - master->transfer = mpc8xxx_spi_transfer; - master->cleanup = mpc8xxx_spi_cleanup; + master->setup = fsl_spi_setup; mpc8xxx_spi = spi_master_get_devdata(master); - mpc8xxx_spi->dev = dev; - mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; - mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; - mpc8xxx_spi->flags = pdata->flags; - mpc8xxx_spi->spibrg = pdata->sysclk; + mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg; + mpc8xxx_spi->spi_remove = fsl_spi_remove; + - ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi); + ret = fsl_spi_cpm_init(mpc8xxx_spi); if (ret) goto err_cpm_init; - mpc8xxx_spi->rx_shift = 0; - mpc8xxx_spi->tx_shift = 0; if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { mpc8xxx_spi->rx_shift = 16; mpc8xxx_spi->tx_shift = 24; } - init_completion(&mpc8xxx_spi->done); - - mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem)); - if (mpc8xxx_spi->base == NULL) { + mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); + if (mpc8xxx_spi->reg_base == NULL) { ret = -ENOMEM; goto err_ioremap; } - mpc8xxx_spi->irq = irq; - /* Register for SPI Interrupt */ - ret = request_irq(mpc8xxx_spi->irq, mpc8xxx_spi_irq, - 0, "mpc8xxx_spi", mpc8xxx_spi); + ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq, + 0, "fsl_spi", mpc8xxx_spi); if (ret != 0) - goto unmap_io; + goto free_irq; - master->bus_num = pdata->bus_num; - master->num_chipselect = pdata->max_chipselect; + reg_base = mpc8xxx_spi->reg_base; /* SPI controller initializations */ - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, 0); - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->command, 0); - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, 0xffffffff); + mpc8xxx_spi_write_reg(®_base->mode, 0); + mpc8xxx_spi_write_reg(®_base->mask, 0); + mpc8xxx_spi_write_reg(®_base->command, 0); + mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); /* Enable SPI interface */ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) regval |= SPMODE_OP; - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval); - spin_lock_init(&mpc8xxx_spi->lock); - init_completion(&mpc8xxx_spi->done); - INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); - INIT_LIST_HEAD(&mpc8xxx_spi->queue); - - mpc8xxx_spi->workqueue = create_singlethread_workqueue( - dev_name(master->dev.parent)); - if (mpc8xxx_spi->workqueue == NULL) { - ret = -EBUSY; - goto free_irq; - } + mpc8xxx_spi_write_reg(®_base->mode, regval); ret = spi_register_master(master); if (ret < 0) goto unreg_master; - dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base, + dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base, mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); return master; unreg_master: - destroy_workqueue(mpc8xxx_spi->workqueue); -free_irq: free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); -unmap_io: - iounmap(mpc8xxx_spi->base); +free_irq: + iounmap(mpc8xxx_spi->reg_base); err_ioremap: - mpc8xxx_spi_cpm_free(mpc8xxx_spi); + fsl_spi_cpm_free(mpc8xxx_spi); err_cpm_init: +err_probe: spi_master_put(master); err: return ERR_PTR(ret); } -static int __devexit mpc8xxx_spi_remove(struct device *dev) -{ - struct mpc8xxx_spi *mpc8xxx_spi; - struct spi_master *master; - - master = dev_get_drvdata(dev); - mpc8xxx_spi = spi_master_get_devdata(master); - - flush_workqueue(mpc8xxx_spi->workqueue); - destroy_workqueue(mpc8xxx_spi->workqueue); - spi_unregister_master(master); - - free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); - iounmap(mpc8xxx_spi->base); - mpc8xxx_spi_cpm_free(mpc8xxx_spi); - - return 0; -} - -struct mpc8xxx_spi_probe_info { - struct fsl_spi_platform_data pdata; - int *gpios; - bool *alow_flags; -}; - -static struct mpc8xxx_spi_probe_info * -to_of_pinfo(struct fsl_spi_platform_data *pdata) -{ - return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); -} - -static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on) +static void fsl_spi_cs_control(struct spi_device *spi, bool on) { struct device *dev = spi->dev.parent; struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); @@ -1120,9 +943,9 @@ static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on) gpio_set_value(gpio, on ^ alow); } -static int of_mpc8xxx_spi_get_chipselects(struct device *dev) +static int of_fsl_spi_get_chipselects(struct device *dev) { - struct device_node *np = dev_archdata_get_node(&dev->archdata); + struct device_node *np = dev->of_node; struct fsl_spi_platform_data *pdata = dev->platform_data; struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); unsigned int ngpios; @@ -1181,7 +1004,7 @@ static int of_mpc8xxx_spi_get_chipselects(struct device *dev) } pdata->max_chipselect = ngpios; - pdata->cs_control = mpc8xxx_spi_cs_control; + pdata->cs_control = fsl_spi_cs_control; return 0; @@ -1200,7 +1023,7 @@ err_alloc_flags: return ret; } -static int of_mpc8xxx_spi_free_chipselects(struct device *dev) +static int of_fsl_spi_free_chipselects(struct device *dev) { struct fsl_spi_platform_data *pdata = dev->platform_data; struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); @@ -1219,50 +1042,21 @@ static int of_mpc8xxx_spi_free_chipselects(struct device *dev) return 0; } -static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev, - const struct of_device_id *ofid) +static int __devinit of_fsl_spi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid) { struct device *dev = &ofdev->dev; - struct device_node *np = ofdev->node; - struct mpc8xxx_spi_probe_info *pinfo; - struct fsl_spi_platform_data *pdata; + struct device_node *np = ofdev->dev.of_node; struct spi_master *master; struct resource mem; struct resource irq; - const void *prop; int ret = -ENOMEM; - pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); - if (!pinfo) - return -ENOMEM; - - pdata = &pinfo->pdata; - dev->platform_data = pdata; - - /* Allocate bus num dynamically. */ - pdata->bus_num = -1; - - /* SPI controller is either clocked from QE or SoC clock. */ - pdata->sysclk = get_brgfreq(); - if (pdata->sysclk == -1) { - pdata->sysclk = fsl_get_sys_freq(); - if (pdata->sysclk == -1) { - ret = -ENODEV; - goto err_clk; - } - } + ret = of_mpc8xxx_spi_probe(ofdev, ofid); + if (ret) + return ret; - prop = of_get_property(np, "mode", NULL); - if (prop && !strcmp(prop, "cpu-qe")) - pdata->flags = SPI_QE_CPU_MODE; - else if (prop && !strcmp(prop, "qe")) - pdata->flags = SPI_CPM_MODE | SPI_QE; - else if (of_device_is_compatible(np, "fsl,cpm2-spi")) - pdata->flags = SPI_CPM_MODE | SPI_CPM2; - else if (of_device_is_compatible(np, "fsl,cpm1-spi")) - pdata->flags = SPI_CPM_MODE | SPI_CPM1; - - ret = of_mpc8xxx_spi_get_chipselects(dev); + ret = of_fsl_spi_get_chipselects(dev); if (ret) goto err; @@ -1276,50 +1070,49 @@ static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev, goto err; } - master = mpc8xxx_spi_probe(dev, &mem, irq.start); + master = fsl_spi_probe(dev, &mem, irq.start); if (IS_ERR(master)) { ret = PTR_ERR(master); goto err; } - of_register_spi_devices(master, np); - return 0; err: - of_mpc8xxx_spi_free_chipselects(dev); -err_clk: - kfree(pinfo); + of_fsl_spi_free_chipselects(dev); return ret; } -static int __devexit of_mpc8xxx_spi_remove(struct of_device *ofdev) +static int __devexit of_fsl_spi_remove(struct platform_device *ofdev) { int ret; ret = mpc8xxx_spi_remove(&ofdev->dev); if (ret) return ret; - of_mpc8xxx_spi_free_chipselects(&ofdev->dev); + of_fsl_spi_free_chipselects(&ofdev->dev); return 0; } -static const struct of_device_id of_mpc8xxx_spi_match[] = { +static const struct of_device_id of_fsl_spi_match[] = { { .compatible = "fsl,spi" }, - {}, + {} }; -MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match); +MODULE_DEVICE_TABLE(of, of_fsl_spi_match); -static struct of_platform_driver of_mpc8xxx_spi_driver = { - .name = "mpc8xxx_spi", - .match_table = of_mpc8xxx_spi_match, - .probe = of_mpc8xxx_spi_probe, - .remove = __devexit_p(of_mpc8xxx_spi_remove), +static struct of_platform_driver of_fsl_spi_driver = { + .driver = { + .name = "fsl_spi", + .owner = THIS_MODULE, + .of_match_table = of_fsl_spi_match, + }, + .probe = of_fsl_spi_probe, + .remove = __devexit_p(of_fsl_spi_remove), }; #ifdef CONFIG_MPC832x_RDB /* - * XXX XXX XXX + * XXX XXX XXX * This is "legacy" platform driver, was used by the MPC8323E-RDB boards * only. The driver should go away soon, since newer MPC8323E-RDB's device * tree can work with OpenFirmware driver. But for now we support old trees @@ -1328,7 +1121,7 @@ static struct of_platform_driver of_mpc8xxx_spi_driver = { static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) { struct resource *mem; - unsigned int irq; + int irq; struct spi_master *master; if (!pdev->dev.platform_data) @@ -1339,10 +1132,10 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) return -EINVAL; irq = platform_get_irq(pdev, 0); - if (!irq) + if (irq <= 0) return -EINVAL; - master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); + master = fsl_spi_probe(&pdev->dev, mem, irq); if (IS_ERR(master)) return PTR_ERR(master); return 0; @@ -1381,21 +1174,20 @@ static void __init legacy_driver_register(void) {} static void __exit legacy_driver_unregister(void) {} #endif /* CONFIG_MPC832x_RDB */ -static int __init mpc8xxx_spi_init(void) +static int __init fsl_spi_init(void) { legacy_driver_register(); - return of_register_platform_driver(&of_mpc8xxx_spi_driver); + return of_register_platform_driver(&of_fsl_spi_driver); } +module_init(fsl_spi_init); -static void __exit mpc8xxx_spi_exit(void) +static void __exit fsl_spi_exit(void) { - of_unregister_platform_driver(&of_mpc8xxx_spi_driver); + of_unregister_platform_driver(&of_fsl_spi_driver); legacy_driver_unregister(); } - -module_init(mpc8xxx_spi_init); -module_exit(mpc8xxx_spi_exit); +module_exit(fsl_spi_exit); MODULE_AUTHOR("Kumar Gala"); -MODULE_DESCRIPTION("Simple MPC8xxx SPI Driver"); +MODULE_DESCRIPTION("Simple Freescale SPI Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c index 26bd03e6185..63e51b011d5 100644 --- a/drivers/spi/spi_gpio.c +++ b/drivers/spi/spi_gpio.c @@ -127,8 +127,7 @@ static inline int getmiso(const struct spi_device *spi) */ #define spidelay(nsecs) do {} while (0) -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" /* * These functions can leverage inline expansion of GPIO calls to shrink @@ -147,25 +146,63 @@ static inline int getmiso(const struct spi_device *spi) static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); } static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); } static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); +} + +/* + * These functions do not call setmosi or getmiso if respective flag + * (SPI_MASTER_NO_RX or SPI_MASTER_NO_TX) is set, so they are safe to + * call when such pin is not present or defined in the controller. + * A separate set of callbacks is defined to get highest possible + * speed in the generic case (when both MISO and MOSI lines are + * available), as optimiser will remove the checks when argument is + * constant. + */ + +static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits); } /*----------------------------------------------------------------------*/ @@ -233,19 +270,30 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) } static int __init -spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label) +spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label, + u16 *res_flags) { int value; /* NOTE: SPI_*_GPIO symbols may reference "pdata" */ - value = spi_gpio_alloc(SPI_MOSI_GPIO, label, false); - if (value) - goto done; + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) { + value = spi_gpio_alloc(SPI_MOSI_GPIO, label, false); + if (value) + goto done; + } else { + /* HW configuration without MOSI pin */ + *res_flags |= SPI_MASTER_NO_TX; + } - value = spi_gpio_alloc(SPI_MISO_GPIO, label, true); - if (value) - goto free_mosi; + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) { + value = spi_gpio_alloc(SPI_MISO_GPIO, label, true); + if (value) + goto free_mosi; + } else { + /* HW configuration without MISO pin */ + *res_flags |= SPI_MASTER_NO_RX; + } value = spi_gpio_alloc(SPI_SCK_GPIO, label, false); if (value) @@ -254,9 +302,11 @@ spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label) goto done; free_miso: - gpio_free(SPI_MISO_GPIO); + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); free_mosi: - gpio_free(SPI_MOSI_GPIO); + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); done: return value; } @@ -267,6 +317,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) struct spi_master *master; struct spi_gpio *spi_gpio; struct spi_gpio_platform_data *pdata; + u16 master_flags = 0; pdata = pdev->dev.platform_data; #ifdef GENERIC_BITBANG @@ -274,7 +325,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) return -ENODEV; #endif - status = spi_gpio_request(pdata, dev_name(&pdev->dev)); + status = spi_gpio_request(pdata, dev_name(&pdev->dev), &master_flags); if (status < 0) return status; @@ -290,6 +341,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) if (pdata) spi_gpio->pdata = *pdata; + master->flags = master_flags; master->bus_num = pdev->id; master->num_chipselect = SPI_N_CHIPSEL; master->setup = spi_gpio_setup; @@ -297,10 +349,18 @@ static int __init spi_gpio_probe(struct platform_device *pdev) spi_gpio->bitbang.master = spi_master_get(master); spi_gpio->bitbang.chipselect = spi_gpio_chipselect; - spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; - spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; - spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; - spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3; + + if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { + spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; + spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; + spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; + spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3; + } else { + spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0; + spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1; + spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2; + spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3; + } spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer; spi_gpio->bitbang.flags = SPI_CS_HIGH; @@ -308,8 +368,10 @@ static int __init spi_gpio_probe(struct platform_device *pdev) if (status < 0) { spi_master_put(spi_gpio->bitbang.master); gpio_free: - gpio_free(SPI_MISO_GPIO); - gpio_free(SPI_MOSI_GPIO); + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); gpio_free(SPI_SCK_GPIO); spi_master_put(master); } @@ -332,8 +394,10 @@ static int __exit spi_gpio_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); - gpio_free(SPI_MISO_GPIO); - gpio_free(SPI_MOSI_GPIO); + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); gpio_free(SPI_SCK_GPIO); return status; diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index 1893f1e96dc..55a38e2c6c1 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c @@ -30,6 +30,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/types.h> @@ -55,7 +56,28 @@ struct spi_imx_config { unsigned int speed_hz; unsigned int bpw; unsigned int mode; - int cs; + u8 cs; +}; + +enum spi_imx_devtype { + SPI_IMX_VER_IMX1, + SPI_IMX_VER_0_0, + SPI_IMX_VER_0_4, + SPI_IMX_VER_0_5, + SPI_IMX_VER_0_7, + SPI_IMX_VER_2_3, + SPI_IMX_VER_AUTODETECT, +}; + +struct spi_imx_data; + +struct spi_imx_devtype_data { + void (*intctrl)(struct spi_imx_data *, int); + int (*config)(struct spi_imx_data *, struct spi_imx_config *); + void (*trigger)(struct spi_imx_data *); + int (*rx_available)(struct spi_imx_data *); + void (*reset)(struct spi_imx_data *); + unsigned int fifosize; }; struct spi_imx_data { @@ -75,11 +97,7 @@ struct spi_imx_data { const void *tx_buf; unsigned int txfifo; /* number of words pushed in tx FIFO */ - /* SoC specific functions */ - void (*intctrl)(struct spi_imx_data *, int); - int (*config)(struct spi_imx_data *, struct spi_imx_config *); - void (*trigger)(struct spi_imx_data *); - int (*rx_available)(struct spi_imx_data *); + struct spi_imx_devtype_data devtype_data; }; #define MXC_SPI_BUF_RX(type) \ @@ -139,7 +157,7 @@ static unsigned int spi_imx_clkdiv_1(unsigned int fin, return max; } -/* MX1, MX31, MX35 */ +/* MX1, MX31, MX35, MX51 CSPI */ static unsigned int spi_imx_clkdiv_2(unsigned int fin, unsigned int fspi) { @@ -154,6 +172,128 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin, return 7; } +#define SPI_IMX2_3_CTRL 0x08 +#define SPI_IMX2_3_CTRL_ENABLE (1 << 0) +#define SPI_IMX2_3_CTRL_XCH (1 << 2) +#define SPI_IMX2_3_CTRL_MODE(cs) (1 << ((cs) + 4)) +#define SPI_IMX2_3_CTRL_POSTDIV_OFFSET 8 +#define SPI_IMX2_3_CTRL_PREDIV_OFFSET 12 +#define SPI_IMX2_3_CTRL_CS(cs) ((cs) << 18) +#define SPI_IMX2_3_CTRL_BL_OFFSET 20 + +#define SPI_IMX2_3_CONFIG 0x0c +#define SPI_IMX2_3_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) +#define SPI_IMX2_3_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) +#define SPI_IMX2_3_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) +#define SPI_IMX2_3_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) + +#define SPI_IMX2_3_INT 0x10 +#define SPI_IMX2_3_INT_TEEN (1 << 0) +#define SPI_IMX2_3_INT_RREN (1 << 3) + +#define SPI_IMX2_3_STAT 0x18 +#define SPI_IMX2_3_STAT_RR (1 << 3) + +/* MX51 eCSPI */ +static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi) +{ + /* + * there are two 4-bit dividers, the pre-divider divides by + * $pre, the post-divider by 2^$post + */ + unsigned int pre, post; + + if (unlikely(fspi > fin)) + return 0; + + post = fls(fin) - fls(fspi); + if (fin > fspi << post) + post++; + + /* now we have: (fin <= fspi << post) with post being minimal */ + + post = max(4U, post) - 4; + if (unlikely(post > 0xf)) { + pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", + __func__, fspi, fin); + return 0xff; + } + + pre = DIV_ROUND_UP(fin, fspi << post) - 1; + + pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", + __func__, fin, fspi, post, pre); + return (pre << SPI_IMX2_3_CTRL_PREDIV_OFFSET) | + (post << SPI_IMX2_3_CTRL_POSTDIV_OFFSET); +} + +static void __maybe_unused spi_imx2_3_intctrl(struct spi_imx_data *spi_imx, int enable) +{ + unsigned val = 0; + + if (enable & MXC_INT_TE) + val |= SPI_IMX2_3_INT_TEEN; + + if (enable & MXC_INT_RR) + val |= SPI_IMX2_3_INT_RREN; + + writel(val, spi_imx->base + SPI_IMX2_3_INT); +} + +static void __maybe_unused spi_imx2_3_trigger(struct spi_imx_data *spi_imx) +{ + u32 reg; + + reg = readl(spi_imx->base + SPI_IMX2_3_CTRL); + reg |= SPI_IMX2_3_CTRL_XCH; + writel(reg, spi_imx->base + SPI_IMX2_3_CTRL); +} + +static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) +{ + u32 ctrl = SPI_IMX2_3_CTRL_ENABLE, cfg = 0; + + /* set master mode */ + ctrl |= SPI_IMX2_3_CTRL_MODE(config->cs); + + /* set clock speed */ + ctrl |= spi_imx2_3_clkdiv(spi_imx->spi_clk, config->speed_hz); + + /* set chip select to use */ + ctrl |= SPI_IMX2_3_CTRL_CS(config->cs); + + ctrl |= (config->bpw - 1) << SPI_IMX2_3_CTRL_BL_OFFSET; + + cfg |= SPI_IMX2_3_CONFIG_SBBCTRL(config->cs); + + if (config->mode & SPI_CPHA) + cfg |= SPI_IMX2_3_CONFIG_SCLKPHA(config->cs); + + if (config->mode & SPI_CPOL) + cfg |= SPI_IMX2_3_CONFIG_SCLKPOL(config->cs); + + if (config->mode & SPI_CS_HIGH) + cfg |= SPI_IMX2_3_CONFIG_SSBPOL(config->cs); + + writel(ctrl, spi_imx->base + SPI_IMX2_3_CTRL); + writel(cfg, spi_imx->base + SPI_IMX2_3_CONFIG); + + return 0; +} + +static int __maybe_unused spi_imx2_3_rx_available(struct spi_imx_data *spi_imx) +{ + return readl(spi_imx->base + SPI_IMX2_3_STAT) & SPI_IMX2_3_STAT_RR; +} + +static void __maybe_unused spi_imx2_3_reset(struct spi_imx_data *spi_imx) +{ + /* drain receive buffer */ + while (spi_imx2_3_rx_available(spi_imx)) + readl(spi_imx->base + MXC_CSPIRXDATA); +} + #define MX31_INTREG_TEEN (1 << 0) #define MX31_INTREG_RREN (1 << 3) @@ -177,7 +317,7 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin, * the i.MX35 has a slightly different register layout for bits * we do not use here. */ -static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable) +static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; @@ -189,7 +329,7 @@ static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable) writel(val, spi_imx->base + MXC_CSPIINT); } -static void mx31_trigger(struct spi_imx_data *spi_imx) +static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; @@ -198,20 +338,16 @@ static void mx31_trigger(struct spi_imx_data *spi_imx) writel(reg, spi_imx->base + MXC_CSPICTRL); } -static int mx31_config(struct spi_imx_data *spi_imx, +static int __maybe_unused spi_imx0_4_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << MX31_CSPICTRL_DR_SHIFT; - if (cpu_is_mx31()) - reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; - else if (cpu_is_mx25() || cpu_is_mx35()) { - reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; - reg |= MX31_CSPICTRL_SSCTL; - } + reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; if (config->mode & SPI_CPHA) reg |= MX31_CSPICTRL_PHA; @@ -219,23 +355,52 @@ static int mx31_config(struct spi_imx_data *spi_imx, reg |= MX31_CSPICTRL_POL; if (config->mode & SPI_CS_HIGH) reg |= MX31_CSPICTRL_SSPOL; - if (config->cs < 0) { - if (cpu_is_mx31()) - reg |= (config->cs + 32) << MX31_CSPICTRL_CS_SHIFT; - else if (cpu_is_mx25() || cpu_is_mx35()) - reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT; - } + if (cs < 0) + reg |= (cs + 32) << MX31_CSPICTRL_CS_SHIFT; + + writel(reg, spi_imx->base + MXC_CSPICTRL); + + return 0; +} + +static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) +{ + unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; + + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << + MX31_CSPICTRL_DR_SHIFT; + + reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; + reg |= MX31_CSPICTRL_SSCTL; + + if (config->mode & SPI_CPHA) + reg |= MX31_CSPICTRL_PHA; + if (config->mode & SPI_CPOL) + reg |= MX31_CSPICTRL_POL; + if (config->mode & SPI_CS_HIGH) + reg |= MX31_CSPICTRL_SSPOL; + if (cs < 0) + reg |= (cs + 32) << MX35_CSPICTRL_CS_SHIFT; writel(reg, spi_imx->base + MXC_CSPICTRL); return 0; } -static int mx31_rx_available(struct spi_imx_data *spi_imx) +static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx) { return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; } +static void __maybe_unused spi_imx0_4_reset(struct spi_imx_data *spi_imx) +{ + /* drain receive buffer */ + while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR) + readl(spi_imx->base + MXC_CSPIRXDATA); +} + #define MX27_INTREG_RR (1 << 4) #define MX27_INTREG_TEEN (1 << 9) #define MX27_INTREG_RREN (1 << 13) @@ -249,7 +414,7 @@ static int mx31_rx_available(struct spi_imx_data *spi_imx) #define MX27_CSPICTRL_DR_SHIFT 14 #define MX27_CSPICTRL_CS_SHIFT 19 -static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable) +static void __maybe_unused mx27_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; @@ -261,7 +426,7 @@ static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable) writel(val, spi_imx->base + MXC_CSPIINT); } -static void mx27_trigger(struct spi_imx_data *spi_imx) +static void __maybe_unused mx27_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; @@ -270,10 +435,11 @@ static void mx27_trigger(struct spi_imx_data *spi_imx) writel(reg, spi_imx->base + MXC_CSPICTRL); } -static int mx27_config(struct spi_imx_data *spi_imx, +static int __maybe_unused mx27_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) << MX27_CSPICTRL_DR_SHIFT; @@ -285,19 +451,24 @@ static int mx27_config(struct spi_imx_data *spi_imx, reg |= MX27_CSPICTRL_POL; if (config->mode & SPI_CS_HIGH) reg |= MX27_CSPICTRL_SSPOL; - if (config->cs < 0) - reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT; + if (cs < 0) + reg |= (cs + 32) << MX27_CSPICTRL_CS_SHIFT; writel(reg, spi_imx->base + MXC_CSPICTRL); return 0; } -static int mx27_rx_available(struct spi_imx_data *spi_imx) +static int __maybe_unused mx27_rx_available(struct spi_imx_data *spi_imx) { return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR; } +static void __maybe_unused spi_imx0_0_reset(struct spi_imx_data *spi_imx) +{ + writel(1, spi_imx->base + MXC_RESET); +} + #define MX1_INTREG_RR (1 << 3) #define MX1_INTREG_TEEN (1 << 8) #define MX1_INTREG_RREN (1 << 11) @@ -309,7 +480,7 @@ static int mx27_rx_available(struct spi_imx_data *spi_imx) #define MX1_CSPICTRL_MASTER (1 << 10) #define MX1_CSPICTRL_DR_SHIFT 13 -static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable) +static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; @@ -321,7 +492,7 @@ static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable) writel(val, spi_imx->base + MXC_CSPIINT); } -static void mx1_trigger(struct spi_imx_data *spi_imx) +static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; @@ -330,7 +501,7 @@ static void mx1_trigger(struct spi_imx_data *spi_imx) writel(reg, spi_imx->base + MXC_CSPICTRL); } -static int mx1_config(struct spi_imx_data *spi_imx, +static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; @@ -349,11 +520,73 @@ static int mx1_config(struct spi_imx_data *spi_imx, return 0; } -static int mx1_rx_available(struct spi_imx_data *spi_imx) +static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx) { return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; } +static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx) +{ + writel(1, spi_imx->base + MXC_RESET); +} + +/* + * These version numbers are taken from the Freescale driver. Unfortunately it + * doesn't support i.MX1, so this entry doesn't match the scheme. :-( + */ +static struct spi_imx_devtype_data spi_imx_devtype_data[] __devinitdata = { +#ifdef CONFIG_SPI_IMX_VER_IMX1 + [SPI_IMX_VER_IMX1] = { + .intctrl = mx1_intctrl, + .config = mx1_config, + .trigger = mx1_trigger, + .rx_available = mx1_rx_available, + .reset = mx1_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_0 + [SPI_IMX_VER_0_0] = { + .intctrl = mx27_intctrl, + .config = mx27_config, + .trigger = mx27_trigger, + .rx_available = mx27_rx_available, + .reset = spi_imx0_0_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_4 + [SPI_IMX_VER_0_4] = { + .intctrl = mx31_intctrl, + .config = spi_imx0_4_config, + .trigger = mx31_trigger, + .rx_available = mx31_rx_available, + .reset = spi_imx0_4_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_7 + [SPI_IMX_VER_0_7] = { + .intctrl = mx31_intctrl, + .config = spi_imx0_7_config, + .trigger = mx31_trigger, + .rx_available = mx31_rx_available, + .reset = spi_imx0_4_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_2_3 + [SPI_IMX_VER_2_3] = { + .intctrl = spi_imx2_3_intctrl, + .config = spi_imx2_3_config, + .trigger = spi_imx2_3_trigger, + .rx_available = spi_imx2_3_rx_available, + .reset = spi_imx2_3_reset, + .fifosize = 64, + }, +#endif +}; + static void spi_imx_chipselect(struct spi_device *spi, int is_active) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); @@ -369,21 +602,21 @@ static void spi_imx_chipselect(struct spi_device *spi, int is_active) static void spi_imx_push(struct spi_imx_data *spi_imx) { - while (spi_imx->txfifo < 8) { + while (spi_imx->txfifo < spi_imx->devtype_data.fifosize) { if (!spi_imx->count) break; spi_imx->tx(spi_imx); spi_imx->txfifo++; } - spi_imx->trigger(spi_imx); + spi_imx->devtype_data.trigger(spi_imx); } static irqreturn_t spi_imx_isr(int irq, void *dev_id) { struct spi_imx_data *spi_imx = dev_id; - while (spi_imx->rx_available(spi_imx)) { + while (spi_imx->devtype_data.rx_available(spi_imx)) { spi_imx->rx(spi_imx); spi_imx->txfifo--; } @@ -397,11 +630,12 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id) /* No data left to push, but still waiting for rx data, * enable receive data available interrupt. */ - spi_imx->intctrl(spi_imx, MXC_INT_RR); + spi_imx->devtype_data.intctrl( + spi_imx, MXC_INT_RR); return IRQ_HANDLED; } - spi_imx->intctrl(spi_imx, 0); + spi_imx->devtype_data.intctrl(spi_imx, 0); complete(&spi_imx->xfer_done); return IRQ_HANDLED; @@ -416,7 +650,7 @@ static int spi_imx_setupxfer(struct spi_device *spi, config.bpw = t ? t->bits_per_word : spi->bits_per_word; config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; config.mode = spi->mode; - config.cs = spi_imx->chipselect[spi->chip_select]; + config.cs = spi->chip_select; if (!config.speed_hz) config.speed_hz = spi->max_speed_hz; @@ -438,7 +672,7 @@ static int spi_imx_setupxfer(struct spi_device *spi, } else BUG(); - spi_imx->config(spi_imx, &config); + spi_imx->devtype_data.config(spi_imx, &config); return 0; } @@ -457,7 +691,7 @@ static int spi_imx_transfer(struct spi_device *spi, spi_imx_push(spi_imx); - spi_imx->intctrl(spi_imx, MXC_INT_TE); + spi_imx->devtype_data.intctrl(spi_imx, MXC_INT_TE); wait_for_completion(&spi_imx->xfer_done); @@ -469,7 +703,7 @@ static int spi_imx_setup(struct spi_device *spi) struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); int gpio = spi_imx->chipselect[spi->chip_select]; - pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__, + dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, spi->mode, spi->bits_per_word, spi->max_speed_hz); if (gpio >= 0) @@ -484,6 +718,39 @@ static void spi_imx_cleanup(struct spi_device *spi) { } +static struct platform_device_id spi_imx_devtype[] = { + { + .name = DRIVER_NAME, + .driver_data = SPI_IMX_VER_AUTODETECT, + }, { + .name = "imx1-cspi", + .driver_data = SPI_IMX_VER_IMX1, + }, { + .name = "imx21-cspi", + .driver_data = SPI_IMX_VER_0_0, + }, { + .name = "imx25-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx27-cspi", + .driver_data = SPI_IMX_VER_0_0, + }, { + .name = "imx31-cspi", + .driver_data = SPI_IMX_VER_0_4, + }, { + .name = "imx35-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx51-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx51-ecspi", + .driver_data = SPI_IMX_VER_2_3, + }, { + /* sentinel */ + } +}; + static int __devinit spi_imx_probe(struct platform_device *pdev) { struct spi_imx_master *mxc_platform_info; @@ -535,6 +802,31 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) init_completion(&spi_imx->xfer_done); + if (pdev->id_entry->driver_data == SPI_IMX_VER_AUTODETECT) { + if (cpu_is_mx25() || cpu_is_mx35()) + spi_imx->devtype_data = + spi_imx_devtype_data[SPI_IMX_VER_0_7]; + else if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) + spi_imx->devtype_data = + spi_imx_devtype_data[SPI_IMX_VER_0_4]; + else if (cpu_is_mx27() || cpu_is_mx21()) + spi_imx->devtype_data = + spi_imx_devtype_data[SPI_IMX_VER_0_0]; + else if (cpu_is_mx1()) + spi_imx->devtype_data = + spi_imx_devtype_data[SPI_IMX_VER_IMX1]; + else + BUG(); + } else + spi_imx->devtype_data = + spi_imx_devtype_data[pdev->id_entry->driver_data]; + + if (!spi_imx->devtype_data.intctrl) { + dev_err(&pdev->dev, "no support for this device compiled in\n"); + ret = -ENODEV; + goto out_gpio_free; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "can't get platform resource\n"); @@ -566,24 +858,6 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) goto out_iounmap; } - if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) { - spi_imx->intctrl = mx31_intctrl; - spi_imx->config = mx31_config; - spi_imx->trigger = mx31_trigger; - spi_imx->rx_available = mx31_rx_available; - } else if (cpu_is_mx27() || cpu_is_mx21()) { - spi_imx->intctrl = mx27_intctrl; - spi_imx->config = mx27_config; - spi_imx->trigger = mx27_trigger; - spi_imx->rx_available = mx27_rx_available; - } else if (cpu_is_mx1()) { - spi_imx->intctrl = mx1_intctrl; - spi_imx->config = mx1_config; - spi_imx->trigger = mx1_trigger; - spi_imx->rx_available = mx1_rx_available; - } else - BUG(); - spi_imx->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(spi_imx->clk)) { dev_err(&pdev->dev, "unable to get clock\n"); @@ -594,15 +868,9 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) clk_enable(spi_imx->clk); spi_imx->spi_clk = clk_get_rate(spi_imx->clk); - if (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27()) - writel(1, spi_imx->base + MXC_RESET); - - /* drain receive buffer */ - if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) - while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR) - readl(spi_imx->base + MXC_CSPIRXDATA); + spi_imx->devtype_data.reset(spi_imx); - spi_imx->intctrl(spi_imx, 0); + spi_imx->devtype_data.intctrl(spi_imx, 0); ret = spi_bitbang_start(&spi_imx->bitbang); if (ret) { @@ -667,6 +935,7 @@ static struct platform_driver spi_imx_driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, + .id_table = spi_imx_devtype, .probe = spi_imx_probe, .remove = __devexit_p(spi_imx_remove), }; diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c index 568c781ad91..7746a41ab6d 100644 --- a/drivers/spi/spi_lm70llp.c +++ b/drivers/spi/spi_lm70llp.c @@ -174,8 +174,7 @@ static inline int getmiso(struct spi_device *s) } /*--------------------------------------------------------------------*/ -#define EXPAND_BITBANG_TXRX 1 -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static void lm70_chipselect(struct spi_device *spi, int value) { @@ -192,7 +191,7 @@ static void lm70_chipselect(struct spi_device *spi, int value) */ static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static void spi_lm70llp_attach(struct parport *p) diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi_nuc900.c index b319f9bf9b9..dff63be0d0a 100644 --- a/drivers/spi/spi_nuc900.c +++ b/drivers/spi/spi_nuc900.c @@ -21,6 +21,7 @@ #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/io.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c index 140a18d6cf3..80e172d3e72 100644 --- a/drivers/spi/spi_ppc4xx.c +++ b/drivers/spi/spi_ppc4xx.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/slab.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/of_platform.h> @@ -387,16 +388,16 @@ static void free_gpios(struct ppc4xx_spi *hw) } /* - * of_device layer stuff... + * platform_device layer stuff... */ -static int __init spi_ppc4xx_of_probe(struct of_device *op, +static int __init spi_ppc4xx_of_probe(struct platform_device *op, const struct of_device_id *match) { struct ppc4xx_spi *hw; struct spi_master *master; struct spi_bitbang *bbp; struct resource resource; - struct device_node *np = op->node; + struct device_node *np = op->dev.of_node; struct device *dev = &op->dev; struct device_node *opbnp; int ret; @@ -406,6 +407,7 @@ static int __init spi_ppc4xx_of_probe(struct of_device *op, master = spi_alloc_master(dev, sizeof *hw); if (master == NULL) return -ENOMEM; + master->dev.of_node = np; dev_set_drvdata(dev, master); hw = spi_master_get_devdata(master); hw->master = spi_master_get(master); @@ -544,7 +546,6 @@ static int __init spi_ppc4xx_of_probe(struct of_device *op, } dev_info(dev, "driver initialized\n"); - of_register_spi_devices(master, np); return 0; @@ -564,7 +565,7 @@ free_master: return ret; } -static int __exit spi_ppc4xx_of_remove(struct of_device *op) +static int __exit spi_ppc4xx_of_remove(struct platform_device *op) { struct spi_master *master = dev_get_drvdata(&op->dev); struct ppc4xx_spi *hw = spi_master_get_devdata(master); @@ -578,7 +579,7 @@ static int __exit spi_ppc4xx_of_remove(struct of_device *op) return 0; } -static struct of_device_id spi_ppc4xx_of_match[] = { +static const struct of_device_id spi_ppc4xx_of_match[] = { { .compatible = "ibm,ppc4xx-spi", }, {}, }; @@ -586,12 +587,12 @@ static struct of_device_id spi_ppc4xx_of_match[] = { MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match); static struct of_platform_driver spi_ppc4xx_of_driver = { - .match_table = spi_ppc4xx_of_match, .probe = spi_ppc4xx_of_probe, .remove = __exit_p(spi_ppc4xx_of_remove), .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, + .of_match_table = spi_ppc4xx_of_match, }, }; diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index c010733877a..151a95e4065 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c @@ -21,6 +21,7 @@ #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/io.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> @@ -275,7 +276,7 @@ static inline u32 ack_bit(unsigned int irq) * Claim the FIQ handler (only one can be active at any one time) and * then setup the correct transfer code for this transfer. * - * This call updates all the necessary state information if sucessful, + * This call updates all the necessary state information if successful, * so the caller does not need to do anything more than start the transfer * as normal, since the IRQ will have been re-routed to the FIQ handler. */ diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c index bbf9371cd28..be991359bf9 100644 --- a/drivers/spi/spi_s3c24xx_gpio.c +++ b/drivers/spi/spi_s3c24xx_gpio.c @@ -58,32 +58,31 @@ static inline u32 getmiso(struct spi_device *dev) #define spidelay(x) ndelay(x) -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); } static u32 s3c2410_spigpio_txrx_mode2(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); } static u32 s3c2410_spigpio_txrx_mode3(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); } diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c index 88a456dba96..795828b90f4 100644 --- a/drivers/spi/spi_s3c64xx.c +++ b/drivers/spi/spi_s3c64xx.c @@ -28,7 +28,7 @@ #include <linux/spi/spi.h> #include <mach/dma.h> -#include <plat/spi.h> +#include <plat/s3c64xx-spi.h> /* Registers and bit-fields */ @@ -137,6 +137,7 @@ /** * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. * @clk: Pointer to the spi clock. + * @src_clk: Pointer to the clock used to generate SPI signals. * @master: Pointer to the SPI Protocol master. * @workqueue: Work queue for the SPI xfer requests. * @cntrlr_info: Platform specific data for the controller this driver manages. @@ -157,10 +158,11 @@ struct s3c64xx_spi_driver_data { void __iomem *regs; struct clk *clk; + struct clk *src_clk; struct platform_device *pdev; struct spi_master *master; struct workqueue_struct *workqueue; - struct s3c64xx_spi_cntrlr_info *cntrlr_info; + struct s3c64xx_spi_info *cntrlr_info; struct spi_device *tgl_spi; struct work_struct work; struct list_head queue; @@ -180,7 +182,7 @@ static struct s3c2410_dma_client s3c64xx_spi_dma_client = { static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) { - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; void __iomem *regs = sdd->regs; unsigned long loops; u32 val; @@ -198,6 +200,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) val = readl(regs + S3C64XX_SPI_STATUS); } while (TX_FIFO_LVL(val, sci) && loops--); + if (loops == 0) + dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n"); + /* Flush RxFIFO*/ loops = msecs_to_loops(1); do { @@ -208,6 +213,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) break; } while (loops--); + if (loops == 0) + dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n"); + val = readl(regs + S3C64XX_SPI_CH_CFG); val &= ~S3C64XX_SPI_CH_SW_RST; writel(val, regs + S3C64XX_SPI_CH_CFG); @@ -225,7 +233,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, struct spi_device *spi, struct spi_transfer *xfer, int dma_mode) { - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; void __iomem *regs = sdd->regs; u32 modecfg, chcfg; @@ -253,15 +261,25 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, chcfg |= S3C64XX_SPI_CH_TXCH_ON; if (dma_mode) { modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; - s3c2410_dma_config(sdd->tx_dmach, 1); + s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, xfer->tx_dma, xfer->len); s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); } else { - unsigned char *buf = (unsigned char *) xfer->tx_buf; - int i = 0; - while (i < xfer->len) - writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA); + switch (sdd->cur_bpw) { + case 32: + iowrite32_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len / 4); + break; + case 16: + iowrite16_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len / 2); + break; + default: + iowrite8_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len); + break; + } } } @@ -278,7 +296,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | S3C64XX_SPI_PACKET_CNT_EN, regs + S3C64XX_SPI_PACKET_CNT); - s3c2410_dma_config(sdd->rx_dmach, 1); + s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, xfer->rx_dma, xfer->len); s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); @@ -298,35 +316,37 @@ static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ /* Deselect the last toggled device */ cs = sdd->tgl_spi->controller_data; - cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1); + cs->set_level(cs->line, + spi->mode & SPI_CS_HIGH ? 0 : 1); } sdd->tgl_spi = NULL; } cs = spi->controller_data; - cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0); + cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); } static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, struct spi_transfer *xfer, int dma_mode) { - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; void __iomem *regs = sdd->regs; unsigned long val; int ms; /* millisecs to xfer 'len' bytes @ 'cur_speed' */ ms = xfer->len * 8 * 1000 / sdd->cur_speed; - ms += 5; /* some tolerance */ + ms += 10; /* some tolerance */ if (dma_mode) { val = msecs_to_jiffies(ms) + 10; val = wait_for_completion_timeout(&sdd->xfer_completion, val); } else { + u32 status; val = msecs_to_loops(ms); do { - val = readl(regs + S3C64XX_SPI_STATUS); - } while (RX_FIFO_LVL(val, sci) < xfer->len && --val); + status = readl(regs + S3C64XX_SPI_STATUS); + } while (RX_FIFO_LVL(status, sci) < xfer->len && --val); } if (!val) @@ -356,20 +376,26 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, return -EIO; } } else { - unsigned char *buf; - int i; - /* If it was only Tx */ if (xfer->rx_buf == NULL) { sdd->state &= ~TXBUSY; return 0; } - i = 0; - buf = xfer->rx_buf; - while (i < xfer->len) - buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA); - + switch (sdd->cur_bpw) { + case 32: + ioread32_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len / 4); + break; + case 16: + ioread16_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len / 2); + break; + default: + ioread8_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len); + break; + } sdd->state &= ~RXBUSY; } @@ -384,19 +410,23 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, if (sdd->tgl_spi == spi) sdd->tgl_spi = NULL; - cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1); + cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); } static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) { - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; void __iomem *regs = sdd->regs; u32 val; /* Disable Clock */ - val = readl(regs + S3C64XX_SPI_CLK_CFG); - val &= ~S3C64XX_SPI_ENCLK_ENABLE; - writel(val, regs + S3C64XX_SPI_CLK_CFG); + if (sci->clk_from_cmu) { + clk_disable(sdd->src_clk); + } else { + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val &= ~S3C64XX_SPI_ENCLK_ENABLE; + writel(val, regs + S3C64XX_SPI_CLK_CFG); + } /* Set Polarity and Phase */ val = readl(regs + S3C64XX_SPI_CH_CFG); @@ -420,33 +450,43 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) switch (sdd->cur_bpw) { case 32: val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; + val |= S3C64XX_SPI_MODE_CH_TSZ_WORD; break; case 16: val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; + val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD; break; default: val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; + val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; break; } - val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */ writel(val, regs + S3C64XX_SPI_MODE_CFG); - /* Configure Clock */ - val = readl(regs + S3C64XX_SPI_CLK_CFG); - val &= ~S3C64XX_SPI_PSR_MASK; - val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1) - & S3C64XX_SPI_PSR_MASK); - writel(val, regs + S3C64XX_SPI_CLK_CFG); - - /* Enable Clock */ - val = readl(regs + S3C64XX_SPI_CLK_CFG); - val |= S3C64XX_SPI_ENCLK_ENABLE; - writel(val, regs + S3C64XX_SPI_CLK_CFG); + if (sci->clk_from_cmu) { + /* Configure Clock */ + /* There is half-multiplier before the SPI */ + clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); + /* Enable Clock */ + clk_enable(sdd->src_clk); + } else { + /* Configure Clock */ + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val &= ~S3C64XX_SPI_PSR_MASK; + val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) + & S3C64XX_SPI_PSR_MASK); + writel(val, regs + S3C64XX_SPI_CLK_CFG); + + /* Enable Clock */ + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val |= S3C64XX_SPI_ENCLK_ENABLE; + writel(val, regs + S3C64XX_SPI_CLK_CFG); + } } -void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, - int size, enum s3c2410_dma_buffresult res) +static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, + int size, enum s3c2410_dma_buffresult res) { struct s3c64xx_spi_driver_data *sdd = buf_id; unsigned long flags; @@ -465,8 +505,8 @@ void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, spin_unlock_irqrestore(&sdd->lock, flags); } -void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, - int size, enum s3c2410_dma_buffresult res) +static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, + int size, enum s3c2410_dma_buffresult res) { struct s3c64xx_spi_driver_data *sdd = buf_id; unsigned long flags; @@ -490,6 +530,7 @@ void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, struct spi_message *msg) { + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; struct device *dev = &sdd->pdev->dev; struct spi_transfer *xfer; @@ -505,9 +546,13 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, /* Map until end or first fail */ list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) + continue; + if (xfer->tx_buf != NULL) { - xfer->tx_dma = dma_map_single(dev, xfer->tx_buf, - xfer->len, DMA_TO_DEVICE); + xfer->tx_dma = dma_map_single(dev, + (void *)xfer->tx_buf, xfer->len, + DMA_TO_DEVICE); if (dma_mapping_error(dev, xfer->tx_dma)) { dev_err(dev, "dma_map_single Tx failed\n"); xfer->tx_dma = XFER_DMAADDR_INVALID; @@ -535,6 +580,7 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, struct spi_message *msg) { + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; struct device *dev = &sdd->pdev->dev; struct spi_transfer *xfer; @@ -543,6 +589,9 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) + continue; + if (xfer->rx_buf != NULL && xfer->rx_dma != XFER_DMAADDR_INVALID) dma_unmap_single(dev, xfer->rx_dma, @@ -558,7 +607,7 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, static void handle_msg(struct s3c64xx_spi_driver_data *sdd, struct spi_message *msg) { - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; struct spi_device *spi = msg->spi; struct s3c64xx_spi_csinfo *cs = spi->controller_data; struct spi_transfer *xfer; @@ -598,6 +647,14 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd, bpw = xfer->bits_per_word ? : spi->bits_per_word; speed = xfer->speed_hz ? : spi->max_speed_hz; + if (xfer->len % (bpw / 8)) { + dev_err(&spi->dev, + "Xfer length(%u) not a multiple of word size(%u)\n", + xfer->len, bpw / 8); + status = -EIO; + goto out; + } + if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { sdd->cur_bpw = bpw; sdd->cur_speed = speed; @@ -632,8 +689,8 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd, S3C64XX_SPI_DEACT(sdd); if (status) { - dev_err(&spi->dev, "I/O Error: \ - rx-%d tx-%d res:rx-%c tx-%c len-%d\n", + dev_err(&spi->dev, "I/O Error: " + "rx-%d tx-%d res:rx-%c tx-%c len-%d\n", xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, (sdd->state & RXBUSY) ? 'f' : 'p', (sdd->state & TXBUSY) ? 'f' : 'p', @@ -786,9 +843,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi) { struct s3c64xx_spi_csinfo *cs = spi->controller_data; struct s3c64xx_spi_driver_data *sdd; - struct s3c64xx_spi_cntrlr_info *sci; + struct s3c64xx_spi_info *sci; struct spi_message *msg; - u32 psr, speed; unsigned long flags; int err = 0; @@ -831,32 +887,37 @@ static int s3c64xx_spi_setup(struct spi_device *spi) } /* Check if we can provide the requested rate */ - speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */ - - if (spi->max_speed_hz > speed) - spi->max_speed_hz = speed; - - psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1; - psr &= S3C64XX_SPI_PSR_MASK; - if (psr == S3C64XX_SPI_PSR_MASK) - psr--; + if (!sci->clk_from_cmu) { + u32 psr, speed; + + /* Max possible */ + speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); + + if (spi->max_speed_hz > speed) + spi->max_speed_hz = speed; + + psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; + psr &= S3C64XX_SPI_PSR_MASK; + if (psr == S3C64XX_SPI_PSR_MASK) + psr--; + + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + if (spi->max_speed_hz < speed) { + if (psr+1 < S3C64XX_SPI_PSR_MASK) { + psr++; + } else { + err = -EINVAL; + goto setup_exit; + } + } - speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1); - if (spi->max_speed_hz < speed) { - if (psr+1 < S3C64XX_SPI_PSR_MASK) { - psr++; - } else { + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + if (spi->max_speed_hz >= speed) + spi->max_speed_hz = speed; + else err = -EINVAL; - goto setup_exit; - } } - speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1); - if (spi->max_speed_hz >= speed) - spi->max_speed_hz = speed; - else - err = -EINVAL; - setup_exit: /* setup() returns with device de-selected */ @@ -867,7 +928,7 @@ setup_exit: static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) { - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; void __iomem *regs = sdd->regs; unsigned int val; @@ -878,7 +939,8 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) /* Disable Interrupts - we use Polling if not DMA mode */ writel(0, regs + S3C64XX_SPI_INT_EN); - writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, + if (!sci->clk_from_cmu) + writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, regs + S3C64XX_SPI_CLK_CFG); writel(0, regs + S3C64XX_SPI_MODE_CFG); writel(0, regs + S3C64XX_SPI_PACKET_CNT); @@ -902,7 +964,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) { struct resource *mem_res, *dmatx_res, *dmarx_res; struct s3c64xx_spi_driver_data *sdd; - struct s3c64xx_spi_cntrlr_info *sci; + struct s3c64xx_spi_info *sci; struct spi_master *master; int ret; @@ -917,6 +979,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) return -ENODEV; } + sci = pdev->dev.platform_data; + if (!sci->src_clk_name) { + dev_err(&pdev->dev, + "Board init must call s3c64xx_spi_set_info()\n"); + return -EINVAL; + } + /* Check for availability of necessary resource */ dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); @@ -944,8 +1013,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) return -ENOMEM; } - sci = pdev->dev.platform_data; - platform_set_drvdata(pdev, master); sdd = spi_master_get_devdata(master); @@ -1000,18 +1067,15 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) goto err4; } - if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK) - sci->src_clk = sdd->clk; - else - sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name); - if (IS_ERR(sci->src_clk)) { + sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name); + if (IS_ERR(sdd->src_clk)) { dev_err(&pdev->dev, "Unable to acquire clock '%s'\n", sci->src_clk_name); - ret = PTR_ERR(sci->src_clk); + ret = PTR_ERR(sdd->src_clk); goto err5; } - if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) { + if (clk_enable(sdd->src_clk)) { dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", sci->src_clk_name); ret = -EBUSY; @@ -1040,11 +1104,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) goto err8; } - dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \ - with %d Slaves attached\n", + dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d " + "with %d Slaves attached\n", pdev->id, master->num_chipselect); - dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\ - \tDMA=[Rx-%d, Tx-%d]\n", + dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", mem_res->end, mem_res->start, sdd->rx_dmach, sdd->tx_dmach); @@ -1053,11 +1116,9 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) err8: destroy_workqueue(sdd->workqueue); err7: - if (sci->src_clk != sdd->clk) - clk_disable(sci->src_clk); + clk_disable(sdd->src_clk); err6: - if (sci->src_clk != sdd->clk) - clk_put(sci->src_clk); + clk_put(sdd->src_clk); err5: clk_disable(sdd->clk); err4: @@ -1078,7 +1139,6 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) { struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; struct resource *mem_res; unsigned long flags; @@ -1093,11 +1153,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) destroy_workqueue(sdd->workqueue); - if (sci->src_clk != sdd->clk) - clk_disable(sci->src_clk); - - if (sci->src_clk != sdd->clk) - clk_put(sci->src_clk); + clk_disable(sdd->src_clk); + clk_put(sdd->src_clk); clk_disable(sdd->clk); clk_put(sdd->clk); @@ -1105,7 +1162,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) iounmap((void *) sdd->regs); mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(mem_res->start, resource_size(mem_res)); + if (mem_res != NULL) + release_mem_region(mem_res->start, resource_size(mem_res)); platform_set_drvdata(pdev, NULL); spi_master_put(master); @@ -1118,8 +1176,6 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) { struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; - struct s3c64xx_spi_csinfo *cs; unsigned long flags; spin_lock_irqsave(&sdd->lock, flags); @@ -1130,9 +1186,7 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) msleep(10); /* Disable the clock */ - if (sci->src_clk != sdd->clk) - clk_disable(sci->src_clk); - + clk_disable(sdd->src_clk); clk_disable(sdd->clk); sdd->cur_speed = 0; /* Output Clock is stopped */ @@ -1144,15 +1198,13 @@ static int s3c64xx_spi_resume(struct platform_device *pdev) { struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; unsigned long flags; sci->cfg_gpio(pdev); /* Enable the clock */ - if (sci->src_clk != sdd->clk) - clk_enable(sci->src_clk); - + clk_enable(sdd->src_clk); clk_enable(sdd->clk); s3c64xx_spi_hwinit(sdd, pdev->id); @@ -1183,7 +1235,7 @@ static int __init s3c64xx_spi_init(void) { return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); } -module_init(s3c64xx_spi_init); +subsys_initcall(s3c64xx_spi_init); static void __exit s3c64xx_spi_exit(void) { diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c index 51e5e1dfa6e..d93b66743ba 100644 --- a/drivers/spi/spi_sh_msiof.c +++ b/drivers/spi/spi_sh_msiof.c @@ -20,12 +20,12 @@ #include <linux/bitmap.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/err.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/spi/sh_msiof.h> -#include <asm/spi.h> #include <asm/unaligned.h> struct sh_msiof_spi_priv { @@ -173,15 +173,12 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, int edge; /* - * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG(!) - * 0 0 10 10 1 0 - * 0 1 10 10 0 1 - * 1 0 11 11 0 1 - * 1 1 11 11 1 0 - * - * (!) Note: REDG is inverted recommended data sheet setting + * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG + * 0 0 10 10 1 1 + * 0 1 10 10 0 0 + * 1 0 11 11 0 0 + * 1 1 11 11 1 1 */ - sh_msiof_write(p, FCTR, 0); sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24)); sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24)); @@ -193,7 +190,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, edge = cpol ? cpha : !cpha; tmp |= edge << 27; /* TEDG */ - tmp |= !edge << 26; /* REDG */ + tmp |= edge << 26; /* REDG */ tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */ sh_msiof_write(p, CTR, tmp); } diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c index a65c12ffa73..5c643916119 100644 --- a/drivers/spi/spi_sh_sci.c +++ b/drivers/spi/spi_sh_sci.c @@ -78,31 +78,30 @@ static inline u32 getmiso(struct spi_device *dev) #define spidelay(x) ndelay(x) -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); } static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); } static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); } static void sh_sci_spi_chipselect(struct spi_device *dev, int value) diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c index 2552bb36400..fadff76eb7e 100644 --- a/drivers/spi/spi_stmp.c +++ b/drivers/spi/spi_stmp.c @@ -76,7 +76,7 @@ struct stmp_spi { break; \ } \ cpu_relax(); \ - } while (time_before(end_jiffies, jiffies)); \ + } while (time_before(jiffies, end_jiffies)); \ succeeded; \ }) diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c new file mode 100644 index 00000000000..bb7df02a547 --- /dev/null +++ b/drivers/spi/spi_tegra.c @@ -0,0 +1,618 @@ +/* + * Driver for Nvidia TEGRA spi controller. + * + * Copyright (C) 2010 Google, Inc. + * + * Author: + * Erik Gilling <konkers@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/delay.h> + +#include <linux/spi/spi.h> + +#include <mach/dma.h> + +#define SLINK_COMMAND 0x000 +#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0) +#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5) +#define SLINK_BOTH_EN (1 << 10) +#define SLINK_CS_SW (1 << 11) +#define SLINK_CS_VALUE (1 << 12) +#define SLINK_CS_POLARITY (1 << 13) +#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16) +#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16) +#define SLINK_IDLE_SDA_PULL_LOW (2 << 16) +#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16) +#define SLINK_IDLE_SDA_MASK (3 << 16) +#define SLINK_CS_POLARITY1 (1 << 20) +#define SLINK_CK_SDA (1 << 21) +#define SLINK_CS_POLARITY2 (1 << 22) +#define SLINK_CS_POLARITY3 (1 << 23) +#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24) +#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24) +#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24) +#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24) +#define SLINK_IDLE_SCLK_MASK (3 << 24) +#define SLINK_M_S (1 << 28) +#define SLINK_WAIT (1 << 29) +#define SLINK_GO (1 << 30) +#define SLINK_ENB (1 << 31) + +#define SLINK_COMMAND2 0x004 +#define SLINK_LSBFE (1 << 0) +#define SLINK_SSOE (1 << 1) +#define SLINK_SPIE (1 << 4) +#define SLINK_BIDIROE (1 << 6) +#define SLINK_MODFEN (1 << 7) +#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8) +#define SLINK_CS_ACTIVE_BETWEEN (1 << 17) +#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18) +#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20) +#define SLINK_FIFO_REFILLS_0 (0 << 22) +#define SLINK_FIFO_REFILLS_1 (1 << 22) +#define SLINK_FIFO_REFILLS_2 (2 << 22) +#define SLINK_FIFO_REFILLS_3 (3 << 22) +#define SLINK_FIFO_REFILLS_MASK (3 << 22) +#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26) +#define SLINK_SPC0 (1 << 29) +#define SLINK_TXEN (1 << 30) +#define SLINK_RXEN (1 << 31) + +#define SLINK_STATUS 0x008 +#define SLINK_COUNT(val) (((val) >> 0) & 0x1f) +#define SLINK_WORD(val) (((val) >> 5) & 0x1f) +#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff) +#define SLINK_MODF (1 << 16) +#define SLINK_RX_UNF (1 << 18) +#define SLINK_TX_OVF (1 << 19) +#define SLINK_TX_FULL (1 << 20) +#define SLINK_TX_EMPTY (1 << 21) +#define SLINK_RX_FULL (1 << 22) +#define SLINK_RX_EMPTY (1 << 23) +#define SLINK_TX_UNF (1 << 24) +#define SLINK_RX_OVF (1 << 25) +#define SLINK_TX_FLUSH (1 << 26) +#define SLINK_RX_FLUSH (1 << 27) +#define SLINK_SCLK (1 << 28) +#define SLINK_ERR (1 << 29) +#define SLINK_RDY (1 << 30) +#define SLINK_BSY (1 << 31) + +#define SLINK_MAS_DATA 0x010 +#define SLINK_SLAVE_DATA 0x014 + +#define SLINK_DMA_CTL 0x018 +#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0) +#define SLINK_TX_TRIG_1 (0 << 16) +#define SLINK_TX_TRIG_4 (1 << 16) +#define SLINK_TX_TRIG_8 (2 << 16) +#define SLINK_TX_TRIG_16 (3 << 16) +#define SLINK_TX_TRIG_MASK (3 << 16) +#define SLINK_RX_TRIG_1 (0 << 18) +#define SLINK_RX_TRIG_4 (1 << 18) +#define SLINK_RX_TRIG_8 (2 << 18) +#define SLINK_RX_TRIG_16 (3 << 18) +#define SLINK_RX_TRIG_MASK (3 << 18) +#define SLINK_PACKED (1 << 20) +#define SLINK_PACK_SIZE_4 (0 << 21) +#define SLINK_PACK_SIZE_8 (1 << 21) +#define SLINK_PACK_SIZE_16 (2 << 21) +#define SLINK_PACK_SIZE_32 (3 << 21) +#define SLINK_PACK_SIZE_MASK (3 << 21) +#define SLINK_IE_TXC (1 << 26) +#define SLINK_IE_RXC (1 << 27) +#define SLINK_DMA_EN (1 << 31) + +#define SLINK_STATUS2 0x01c +#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0) +#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16) + +#define SLINK_TX_FIFO 0x100 +#define SLINK_RX_FIFO 0x180 + +static const unsigned long spi_tegra_req_sels[] = { + TEGRA_DMA_REQ_SEL_SL2B1, + TEGRA_DMA_REQ_SEL_SL2B2, + TEGRA_DMA_REQ_SEL_SL2B3, + TEGRA_DMA_REQ_SEL_SL2B4, +}; + +#define BB_LEN 32 + +struct spi_tegra_data { + struct spi_master *master; + struct platform_device *pdev; + spinlock_t lock; + + struct clk *clk; + void __iomem *base; + unsigned long phys; + + u32 cur_speed; + + struct list_head queue; + struct spi_transfer *cur; + unsigned cur_pos; + unsigned cur_len; + unsigned cur_bytes_per_word; + + /* The tegra spi controller has a bug which causes the first word + * in PIO transactions to be garbage. Since packed DMA transactions + * require transfers to be 4 byte aligned we need a bounce buffer + * for the generic case. + */ + struct tegra_dma_req rx_dma_req; + struct tegra_dma_channel *rx_dma; + u32 *rx_bb; + dma_addr_t rx_bb_phys; +}; + + +static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi, + unsigned long reg) +{ + return readl(tspi->base + reg); +} + +static inline void spi_tegra_writel(struct spi_tegra_data *tspi, + unsigned long val, + unsigned long reg) +{ + writel(val, tspi->base + reg); +} + +static void spi_tegra_go(struct spi_tegra_data *tspi) +{ + unsigned long val; + + wmb(); + + val = spi_tegra_readl(tspi, SLINK_DMA_CTL); + val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN; + val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size / 4 - 1); + spi_tegra_writel(tspi, val, SLINK_DMA_CTL); + + tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req); + + val |= SLINK_DMA_EN; + spi_tegra_writel(tspi, val, SLINK_DMA_CTL); +} + +static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi, + struct spi_transfer *t) +{ + unsigned len = min(t->len - tspi->cur_pos, BB_LEN * + tspi->cur_bytes_per_word); + u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos; + int i, j; + unsigned long val; + + val = spi_tegra_readl(tspi, SLINK_COMMAND); + val &= ~SLINK_WORD_SIZE(~0); + val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1); + spi_tegra_writel(tspi, val, SLINK_COMMAND); + + for (i = 0; i < len; i += tspi->cur_bytes_per_word) { + val = 0; + for (j = 0; j < tspi->cur_bytes_per_word; j++) + val |= tx_buf[i + j] << j * 8; + + spi_tegra_writel(tspi, val, SLINK_TX_FIFO); + } + + tspi->rx_dma_req.size = len / tspi->cur_bytes_per_word * 4; + + return len; +} + +static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi, + struct spi_transfer *t) +{ + unsigned len = tspi->cur_len; + u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos; + int i, j; + unsigned long val; + + for (i = 0; i < len; i += tspi->cur_bytes_per_word) { + val = tspi->rx_bb[i / tspi->cur_bytes_per_word]; + for (j = 0; j < tspi->cur_bytes_per_word; j++) + rx_buf[i + j] = (val >> (j * 8)) & 0xff; + } + + return len; +} + +static void spi_tegra_start_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); + u32 speed; + u8 bits_per_word; + unsigned long val; + + speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz; + bits_per_word = t->bits_per_word ? t->bits_per_word : + spi->bits_per_word; + + tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1; + + if (speed != tspi->cur_speed) + clk_set_rate(tspi->clk, speed); + + if (tspi->cur_speed == 0) + clk_enable(tspi->clk); + + tspi->cur_speed = speed; + + val = spi_tegra_readl(tspi, SLINK_COMMAND2); + val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN; + if (t->rx_buf) + val |= SLINK_RXEN; + if (t->tx_buf) + val |= SLINK_TXEN; + val |= SLINK_SS_EN_CS(spi->chip_select); + val |= SLINK_SPIE; + spi_tegra_writel(tspi, val, SLINK_COMMAND2); + + val = spi_tegra_readl(tspi, SLINK_COMMAND); + val &= ~SLINK_BIT_LENGTH(~0); + val |= SLINK_BIT_LENGTH(bits_per_word - 1); + + /* FIXME: should probably control CS manually so that we can be sure + * it does not go low between transfer and to support delay_usecs + * correctly. + */ + val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW; + + if (spi->mode & SPI_CPHA) + val |= SLINK_CK_SDA; + + if (spi->mode & SPI_CPOL) + val |= SLINK_IDLE_SCLK_DRIVE_HIGH; + else + val |= SLINK_IDLE_SCLK_DRIVE_LOW; + + val |= SLINK_M_S; + + spi_tegra_writel(tspi, val, SLINK_COMMAND); + + spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS); + + tspi->cur = t; + tspi->cur_pos = 0; + tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t); + + spi_tegra_go(tspi); +} + +static void spi_tegra_start_message(struct spi_device *spi, + struct spi_message *m) +{ + struct spi_transfer *t; + + m->actual_length = 0; + m->status = 0; + + t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list); + spi_tegra_start_transfer(spi, t); +} + +static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req) +{ + struct spi_tegra_data *tspi = req->dev; + unsigned long flags; + struct spi_message *m; + struct spi_device *spi; + int timeout = 0; + unsigned long val; + + /* the SPI controller may come back with both the BSY and RDY bits + * set. In this case we need to wait for the BSY bit to clear so + * that we are sure the DMA is finished. 1000 reads was empirically + * determined to be long enough. + */ + while (timeout++ < 1000) { + if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY)) + break; + } + + spin_lock_irqsave(&tspi->lock, flags); + + val = spi_tegra_readl(tspi, SLINK_STATUS); + val |= SLINK_RDY; + spi_tegra_writel(tspi, val, SLINK_STATUS); + + m = list_first_entry(&tspi->queue, struct spi_message, queue); + + if (timeout >= 1000) + m->status = -EIO; + + spi = m->state; + + tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur); + m->actual_length += tspi->cur_pos; + + if (tspi->cur_pos < tspi->cur->len) { + tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur); + spi_tegra_go(tspi); + } else if (!list_is_last(&tspi->cur->transfer_list, + &m->transfers)) { + tspi->cur = list_first_entry(&tspi->cur->transfer_list, + struct spi_transfer, + transfer_list); + spi_tegra_start_transfer(spi, tspi->cur); + } else { + list_del(&m->queue); + + m->complete(m->context); + + if (!list_empty(&tspi->queue)) { + m = list_first_entry(&tspi->queue, struct spi_message, + queue); + spi = m->state; + spi_tegra_start_message(spi, m); + } else { + clk_disable(tspi->clk); + tspi->cur_speed = 0; + } + } + + spin_unlock_irqrestore(&tspi->lock, flags); +} + +static int spi_tegra_setup(struct spi_device *spi) +{ + struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); + unsigned long cs_bit; + unsigned long val; + unsigned long flags; + + dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n", + spi->bits_per_word, + spi->mode & SPI_CPOL ? "" : "~", + spi->mode & SPI_CPHA ? "" : "~", + spi->max_speed_hz); + + + switch (spi->chip_select) { + case 0: + cs_bit = SLINK_CS_POLARITY; + break; + + case 1: + cs_bit = SLINK_CS_POLARITY1; + break; + + case 2: + cs_bit = SLINK_CS_POLARITY2; + break; + + case 4: + cs_bit = SLINK_CS_POLARITY3; + break; + + default: + return -EINVAL; + } + + spin_lock_irqsave(&tspi->lock, flags); + + val = spi_tegra_readl(tspi, SLINK_COMMAND); + if (spi->mode & SPI_CS_HIGH) + val |= cs_bit; + else + val &= ~cs_bit; + spi_tegra_writel(tspi, val, SLINK_COMMAND); + + spin_unlock_irqrestore(&tspi->lock, flags); + + return 0; +} + +static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); + struct spi_transfer *t; + unsigned long flags; + int was_empty; + + if (list_empty(&m->transfers) || !m->complete) + return -EINVAL; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->bits_per_word < 0 || t->bits_per_word > 32) + return -EINVAL; + + if (t->len == 0) + return -EINVAL; + + if (!t->rx_buf && !t->tx_buf) + return -EINVAL; + } + + m->state = spi; + + spin_lock_irqsave(&tspi->lock, flags); + was_empty = list_empty(&tspi->queue); + list_add_tail(&m->queue, &tspi->queue); + + if (was_empty) + spi_tegra_start_message(spi, m); + + spin_unlock_irqrestore(&tspi->lock, flags); + + return 0; +} + +static int __init spi_tegra_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct spi_tegra_data *tspi; + struct resource *r; + int ret; + + master = spi_alloc_master(&pdev->dev, sizeof *tspi); + if (master == NULL) { + dev_err(&pdev->dev, "master allocation failed\n"); + return -ENOMEM; + } + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + master->bus_num = pdev->id; + + master->setup = spi_tegra_setup; + master->transfer = spi_tegra_transfer; + master->num_chipselect = 4; + + dev_set_drvdata(&pdev->dev, master); + tspi = spi_master_get_devdata(master); + tspi->master = master; + tspi->pdev = pdev; + spin_lock_init(&tspi->lock); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + ret = -ENODEV; + goto err0; + } + + if (!request_mem_region(r->start, (r->end - r->start) + 1, + dev_name(&pdev->dev))) { + ret = -EBUSY; + goto err0; + } + + tspi->phys = r->start; + tspi->base = ioremap(r->start, r->end - r->start + 1); + if (!tspi->base) { + dev_err(&pdev->dev, "can't ioremap iomem\n"); + ret = -ENOMEM; + goto err1; + } + + tspi->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR_OR_NULL(tspi->clk)) { + dev_err(&pdev->dev, "can not get clock\n"); + ret = PTR_ERR(tspi->clk); + goto err2; + } + + INIT_LIST_HEAD(&tspi->queue); + + tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT); + if (!tspi->rx_dma) { + dev_err(&pdev->dev, "can not allocate rx dma channel\n"); + ret = -ENODEV; + goto err3; + } + + tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN, + &tspi->rx_bb_phys, GFP_KERNEL); + if (!tspi->rx_bb) { + dev_err(&pdev->dev, "can not allocate rx bounce buffer\n"); + ret = -ENOMEM; + goto err4; + } + + tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete; + tspi->rx_dma_req.to_memory = 1; + tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys; + tspi->rx_dma_req.dest_bus_width = 32; + tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO; + tspi->rx_dma_req.source_bus_width = 32; + tspi->rx_dma_req.source_wrap = 4; + tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id]; + tspi->rx_dma_req.dev = tspi; + + ret = spi_register_master(master); + + if (ret < 0) + goto err5; + + return ret; + +err5: + dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, + tspi->rx_bb, tspi->rx_bb_phys); +err4: + tegra_dma_free_channel(tspi->rx_dma); +err3: + clk_put(tspi->clk); +err2: + iounmap(tspi->base); +err1: + release_mem_region(r->start, (r->end - r->start) + 1); +err0: + spi_master_put(master); + return ret; +} + +static int __devexit spi_tegra_remove(struct platform_device *pdev) +{ + struct spi_master *master; + struct spi_tegra_data *tspi; + struct resource *r; + + master = dev_get_drvdata(&pdev->dev); + tspi = spi_master_get_devdata(master); + + tegra_dma_free_channel(tspi->rx_dma); + + dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, + tspi->rx_bb, tspi->rx_bb_phys); + + clk_put(tspi->clk); + iounmap(tspi->base); + + spi_master_put(master); + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(r->start, (r->end - r->start) + 1); + + return 0; +} + +MODULE_ALIAS("platform:spi_tegra"); + +static struct platform_driver spi_tegra_driver = { + .driver = { + .name = "spi_tegra", + .owner = THIS_MODULE, + }, + .remove = __devexit_p(spi_tegra_remove), +}; + +static int __init spi_tegra_init(void) +{ + return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe); +} +module_init(spi_tegra_init); + +static void __exit spi_tegra_exit(void) +{ + platform_driver_unregister(&spi_tegra_driver); +} +module_exit(spi_tegra_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_topcliff_pch.c b/drivers/spi/spi_topcliff_pch.c new file mode 100644 index 00000000000..58e187f45ec --- /dev/null +++ b/drivers/spi/spi_topcliff_pch.c @@ -0,0 +1,1303 @@ +/* + * SPI bus driver for the Topcliff PCH used by Intel SoCs + * + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/wait.h> +#include <linux/spi/spi.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/spi/spidev.h> +#include <linux/module.h> +#include <linux/device.h> + +/* Register offsets */ +#define PCH_SPCR 0x00 /* SPI control register */ +#define PCH_SPBRR 0x04 /* SPI baud rate register */ +#define PCH_SPSR 0x08 /* SPI status register */ +#define PCH_SPDWR 0x0C /* SPI write data register */ +#define PCH_SPDRR 0x10 /* SPI read data register */ +#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */ +#define PCH_SRST 0x1C /* SPI reset register */ + +#define PCH_SPSR_TFD 0x000007C0 +#define PCH_SPSR_RFD 0x0000F800 + +#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11) +#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6) + +#define PCH_RX_THOLD 7 +#define PCH_RX_THOLD_MAX 15 + +#define PCH_MAX_BAUDRATE 5000000 +#define PCH_MAX_FIFO_DEPTH 16 + +#define STATUS_RUNNING 1 +#define STATUS_EXITING 2 +#define PCH_SLEEP_TIME 10 + +#define PCH_ADDRESS_SIZE 0x20 + +#define SSN_LOW 0x02U +#define SSN_NO_CONTROL 0x00U +#define PCH_MAX_CS 0xFF +#define PCI_DEVICE_ID_GE_SPI 0x8816 + +#define SPCR_SPE_BIT (1 << 0) +#define SPCR_MSTR_BIT (1 << 1) +#define SPCR_LSBF_BIT (1 << 4) +#define SPCR_CPHA_BIT (1 << 5) +#define SPCR_CPOL_BIT (1 << 6) +#define SPCR_TFIE_BIT (1 << 8) +#define SPCR_RFIE_BIT (1 << 9) +#define SPCR_FIE_BIT (1 << 10) +#define SPCR_ORIE_BIT (1 << 11) +#define SPCR_MDFIE_BIT (1 << 12) +#define SPCR_FICLR_BIT (1 << 24) +#define SPSR_TFI_BIT (1 << 0) +#define SPSR_RFI_BIT (1 << 1) +#define SPSR_FI_BIT (1 << 2) +#define SPBRR_SIZE_BIT (1 << 10) + +#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT) + +#define SPCR_RFIC_FIELD 20 +#define SPCR_TFIC_FIELD 16 + +#define SPSR_INT_BITS 0x1F +#define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1)) +#define MASK_RFIC_SPCR_BITS (~(0xf << 20)) +#define MASK_TFIC_SPCR_BITS (~(0xf000f << 12)) + +#define PCH_CLOCK_HZ 50000000 +#define PCH_MAX_SPBR 1023 + + +/** + * struct pch_spi_data - Holds the SPI channel specific details + * @io_remap_addr: The remapped PCI base address + * @master: Pointer to the SPI master structure + * @work: Reference to work queue handler + * @wk: Workqueue for carrying out execution of the + * requests + * @wait: Wait queue for waking up upon receiving an + * interrupt. + * @transfer_complete: Status of SPI Transfer + * @bcurrent_msg_processing: Status flag for message processing + * @lock: Lock for protecting this structure + * @queue: SPI Message queue + * @status: Status of the SPI driver + * @bpw_len: Length of data to be transferred in bits per + * word + * @transfer_active: Flag showing active transfer + * @tx_index: Transmit data count; for bookkeeping during + * transfer + * @rx_index: Receive data count; for bookkeeping during + * transfer + * @tx_buff: Buffer for data to be transmitted + * @rx_index: Buffer for Received data + * @n_curnt_chip: The chip number that this SPI driver currently + * operates on + * @current_chip: Reference to the current chip that this SPI + * driver currently operates on + * @current_msg: The current message that this SPI driver is + * handling + * @cur_trans: The current transfer that this SPI driver is + * handling + * @board_dat: Reference to the SPI device data structure + */ +struct pch_spi_data { + void __iomem *io_remap_addr; + struct spi_master *master; + struct work_struct work; + struct workqueue_struct *wk; + wait_queue_head_t wait; + u8 transfer_complete; + u8 bcurrent_msg_processing; + spinlock_t lock; + struct list_head queue; + u8 status; + u32 bpw_len; + u8 transfer_active; + u32 tx_index; + u32 rx_index; + u16 *pkt_tx_buff; + u16 *pkt_rx_buff; + u8 n_curnt_chip; + struct spi_device *current_chip; + struct spi_message *current_msg; + struct spi_transfer *cur_trans; + struct pch_spi_board_data *board_dat; +}; + +/** + * struct pch_spi_board_data - Holds the SPI device specific details + * @pdev: Pointer to the PCI device + * @irq_reg_sts: Status of IRQ registration + * @pci_req_sts: Status of pci_request_regions + * @suspend_sts: Status of suspend + * @data: Pointer to SPI channel data structure + */ +struct pch_spi_board_data { + struct pci_dev *pdev; + u8 irq_reg_sts; + u8 pci_req_sts; + u8 suspend_sts; + struct pch_spi_data *data; +}; + +static struct pci_device_id pch_spi_pcidev_id[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)}, + {0,} +}; + +/** + * pch_spi_writereg() - Performs register writes + * @master: Pointer to struct spi_master. + * @idx: Register offset. + * @val: Value to be written to register. + */ +static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val) +{ + struct pch_spi_data *data = spi_master_get_devdata(master); + iowrite32(val, (data->io_remap_addr + idx)); +} + +/** + * pch_spi_readreg() - Performs register reads + * @master: Pointer to struct spi_master. + * @idx: Register offset. + */ +static inline u32 pch_spi_readreg(struct spi_master *master, int idx) +{ + struct pch_spi_data *data = spi_master_get_devdata(master); + return ioread32(data->io_remap_addr + idx); +} + +static inline void pch_spi_setclr_reg(struct spi_master *master, int idx, + u32 set, u32 clr) +{ + u32 tmp = pch_spi_readreg(master, idx); + tmp = (tmp & ~clr) | set; + pch_spi_writereg(master, idx, tmp); +} + +static void pch_spi_set_master_mode(struct spi_master *master) +{ + pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0); +} + +/** + * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs + * @master: Pointer to struct spi_master. + */ +static void pch_spi_clear_fifo(struct spi_master *master) +{ + pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0); + pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT); +} + +static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, + void __iomem *io_remap_addr) +{ + u32 n_read, tx_index, rx_index, bpw_len; + u16 *pkt_rx_buffer, *pkt_tx_buff; + int read_cnt; + u32 reg_spcr_val; + void __iomem *spsr; + void __iomem *spdrr; + void __iomem *spdwr; + + spsr = io_remap_addr + PCH_SPSR; + iowrite32(reg_spsr_val, spsr); + + if (data->transfer_active) { + rx_index = data->rx_index; + tx_index = data->tx_index; + bpw_len = data->bpw_len; + pkt_rx_buffer = data->pkt_rx_buff; + pkt_tx_buff = data->pkt_tx_buff; + + spdrr = io_remap_addr + PCH_SPDRR; + spdwr = io_remap_addr + PCH_SPDWR; + + n_read = PCH_READABLE(reg_spsr_val); + + for (read_cnt = 0; (read_cnt < n_read); read_cnt++) { + pkt_rx_buffer[rx_index++] = ioread32(spdrr); + if (tx_index < bpw_len) + iowrite32(pkt_tx_buff[tx_index++], spdwr); + } + + /* disable RFI if not needed */ + if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) { + reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR); + reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */ + + /* reset rx threshold */ + reg_spcr_val &= MASK_RFIC_SPCR_BITS; + reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD); + iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))), + (io_remap_addr + PCH_SPCR)); + } + + /* update counts */ + data->tx_index = tx_index; + data->rx_index = rx_index; + + } + + /* if transfer complete interrupt */ + if (reg_spsr_val & SPSR_FI_BIT) { + /* disable FI & RFI interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, + SPCR_FIE_BIT | SPCR_TFIE_BIT); + + /* transfer is completed;inform pch_spi_process_messages */ + data->transfer_complete = true; + wake_up(&data->wait); + } +} + +/** + * pch_spi_handler() - Interrupt handler + * @irq: The interrupt number. + * @dev_id: Pointer to struct pch_spi_board_data. + */ +static irqreturn_t pch_spi_handler(int irq, void *dev_id) +{ + u32 reg_spsr_val; + struct pch_spi_data *data; + void __iomem *spsr; + void __iomem *io_remap_addr; + irqreturn_t ret = IRQ_NONE; + struct pch_spi_board_data *board_dat = dev_id; + + if (board_dat->suspend_sts) { + dev_dbg(&board_dat->pdev->dev, + "%s returning due to suspend\n", __func__); + return IRQ_NONE; + } + + data = board_dat->data; + io_remap_addr = data->io_remap_addr; + spsr = io_remap_addr + PCH_SPSR; + + reg_spsr_val = ioread32(spsr); + + /* Check if the interrupt is for SPI device */ + if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { + pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr); + ret = IRQ_HANDLED; + } + + dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n", + __func__, ret); + + return ret; +} + +/** + * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR + * @master: Pointer to struct spi_master. + * @speed_hz: Baud rate. + */ +static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz) +{ + u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2); + + /* if baud rate is less than we can support limit it */ + if (n_spbr > PCH_MAX_SPBR) + n_spbr = PCH_MAX_SPBR; + + pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS); +} + +/** + * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR + * @master: Pointer to struct spi_master. + * @bits_per_word: Bits per word for SPI transfer. + */ +static void pch_spi_set_bits_per_word(struct spi_master *master, + u8 bits_per_word) +{ + if (bits_per_word == 8) + pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT); + else + pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0); +} + +/** + * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer + * @spi: Pointer to struct spi_device. + */ +static void pch_spi_setup_transfer(struct spi_device *spi) +{ + u32 flags = 0; + + dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n", + __func__, pch_spi_readreg(spi->master, PCH_SPBRR), + spi->max_speed_hz); + pch_spi_set_baud_rate(spi->master, spi->max_speed_hz); + + /* set bits per word */ + pch_spi_set_bits_per_word(spi->master, spi->bits_per_word); + + if (!(spi->mode & SPI_LSB_FIRST)) + flags |= SPCR_LSBF_BIT; + if (spi->mode & SPI_CPOL) + flags |= SPCR_CPOL_BIT; + if (spi->mode & SPI_CPHA) + flags |= SPCR_CPHA_BIT; + pch_spi_setclr_reg(spi->master, PCH_SPCR, flags, + (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT)); + + /* Clear the FIFO by toggling FICLR to 1 and back to 0 */ + pch_spi_clear_fifo(spi->master); +} + +/** + * pch_spi_reset() - Clears SPI registers + * @master: Pointer to struct spi_master. + */ +static void pch_spi_reset(struct spi_master *master) +{ + /* write 1 to reset SPI */ + pch_spi_writereg(master, PCH_SRST, 0x1); + + /* clear reset */ + pch_spi_writereg(master, PCH_SRST, 0x0); +} + +static int pch_spi_setup(struct spi_device *pspi) +{ + /* check bits per word */ + if (pspi->bits_per_word == 0) { + pspi->bits_per_word = 8; + dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__); + } + + if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) { + dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__); + return -EINVAL; + } + + /* Check baud rate setting */ + /* if baud rate of chip is greater than + max we can support,return error */ + if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE) + pspi->max_speed_hz = PCH_MAX_BAUDRATE; + + dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__, + (pspi->mode) & (SPI_CPOL | SPI_CPHA)); + + return 0; +} + +static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) +{ + + struct spi_transfer *transfer; + struct pch_spi_data *data = spi_master_get_devdata(pspi->master); + int retval; + unsigned long flags; + + /* validate spi message and baud rate */ + if (unlikely(list_empty(&pmsg->transfers) == 1)) { + dev_err(&pspi->dev, "%s list empty\n", __func__); + retval = -EINVAL; + goto err_out; + } + + if (unlikely(pspi->max_speed_hz == 0)) { + dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n", + __func__, pspi->max_speed_hz); + retval = -EINVAL; + goto err_out; + } + + dev_dbg(&pspi->dev, "%s Transfer List not empty. " + "Transfer Speed is set.\n", __func__); + + /* validate Tx/Rx buffers and Transfer length */ + list_for_each_entry(transfer, &pmsg->transfers, transfer_list) { + if (!transfer->tx_buf && !transfer->rx_buf) { + dev_err(&pspi->dev, + "%s Tx and Rx buffer NULL\n", __func__); + retval = -EINVAL; + goto err_out; + } + + if (!transfer->len) { + dev_err(&pspi->dev, "%s Transfer length invalid\n", + __func__); + retval = -EINVAL; + goto err_out; + } + + dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length" + " valid\n", __func__); + + /* if baud rate hs been specified validate the same */ + if (transfer->speed_hz > PCH_MAX_BAUDRATE) + transfer->speed_hz = PCH_MAX_BAUDRATE; + + /* if bits per word has been specified validate the same */ + if (transfer->bits_per_word) { + if ((transfer->bits_per_word != 8) + && (transfer->bits_per_word != 16)) { + retval = -EINVAL; + dev_err(&pspi->dev, + "%s Invalid bits per word\n", __func__); + goto err_out; + } + } + } + + spin_lock_irqsave(&data->lock, flags); + + /* We won't process any messages if we have been asked to terminate */ + if (data->status == STATUS_EXITING) { + dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__); + retval = -ESHUTDOWN; + goto err_return_spinlock; + } + + /* If suspended ,return -EINVAL */ + if (data->board_dat->suspend_sts) { + dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__); + retval = -EINVAL; + goto err_return_spinlock; + } + + /* set status of message */ + pmsg->actual_length = 0; + dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status); + + pmsg->status = -EINPROGRESS; + + /* add message to queue */ + list_add_tail(&pmsg->queue, &data->queue); + dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__); + + /* schedule work queue to run */ + queue_work(data->wk, &data->work); + dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__); + + retval = 0; + +err_return_spinlock: + spin_unlock_irqrestore(&data->lock, flags); +err_out: + dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); + return retval; +} + +static inline void pch_spi_select_chip(struct pch_spi_data *data, + struct spi_device *pspi) +{ + if (data->current_chip != NULL) { + if (pspi->chip_select != data->n_curnt_chip) { + dev_dbg(&pspi->dev, "%s : different slave\n", __func__); + data->current_chip = NULL; + } + } + + data->current_chip = pspi; + + data->n_curnt_chip = data->current_chip->chip_select; + + dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__); + pch_spi_setup_transfer(pspi); +} + +static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw, + struct spi_message **ppmsg) +{ + int size; + u32 n_writes; + int j; + struct spi_message *pmsg; + const u8 *tx_buf; + const u16 *tx_sbuf; + + pmsg = *ppmsg; + + /* set baud rate if needed */ + if (data->cur_trans->speed_hz) { + dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); + pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz); + } + + /* set bits per word if needed */ + if (data->cur_trans->bits_per_word && + (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) { + dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__); + pch_spi_set_bits_per_word(data->master, + data->cur_trans->bits_per_word); + *bpw = data->cur_trans->bits_per_word; + } else { + *bpw = data->current_msg->spi->bits_per_word; + } + + /* reset Tx/Rx index */ + data->tx_index = 0; + data->rx_index = 0; + + data->bpw_len = data->cur_trans->len / (*bpw / 8); + + /* find alloc size */ + size = data->cur_trans->len * sizeof(*data->pkt_tx_buff); + + /* allocate memory for pkt_tx_buff & pkt_rx_buffer */ + data->pkt_tx_buff = kzalloc(size, GFP_KERNEL); + if (data->pkt_tx_buff != NULL) { + data->pkt_rx_buff = kzalloc(size, GFP_KERNEL); + if (!data->pkt_rx_buff) + kfree(data->pkt_tx_buff); + } + + if (!data->pkt_rx_buff) { + /* flush queue and set status of all transfers to -ENOMEM */ + dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__); + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -ENOMEM; + + if (pmsg->complete != 0) + pmsg->complete(pmsg->context); + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + return; + } + + /* copy Tx Data */ + if (data->cur_trans->tx_buf != NULL) { + if (*bpw == 8) { + tx_buf = data->cur_trans->tx_buf; + for (j = 0; j < data->bpw_len; j++) + data->pkt_tx_buff[j] = *tx_buf++; + } else { + tx_sbuf = data->cur_trans->tx_buf; + for (j = 0; j < data->bpw_len; j++) + data->pkt_tx_buff[j] = *tx_sbuf++; + } + } + + /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */ + n_writes = data->bpw_len; + if (n_writes > PCH_MAX_FIFO_DEPTH) + n_writes = PCH_MAX_FIFO_DEPTH; + + dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing " + "0x2 to SSNXCR\n", __func__); + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW); + + for (j = 0; j < n_writes; j++) + pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]); + + /* update tx_index */ + data->tx_index = j; + + /* reset transfer complete flag */ + data->transfer_complete = false; + data->transfer_active = true; +} + + +static void pch_spi_nomore_transfer(struct pch_spi_data *data, + struct spi_message *pmsg) +{ + dev_dbg(&data->master->dev, "%s called\n", __func__); + /* Invoke complete callback + * [To the spi core..indicating end of transfer] */ + data->current_msg->status = 0; + + if (data->current_msg->complete != 0) { + dev_dbg(&data->master->dev, + "%s:Invoking callback of SPI core\n", __func__); + data->current_msg->complete(data->current_msg->context); + } + + /* update status in global variable */ + data->bcurrent_msg_processing = false; + + dev_dbg(&data->master->dev, + "%s:data->bcurrent_msg_processing = false\n", __func__); + + data->current_msg = NULL; + data->cur_trans = NULL; + + /* check if we have items in list and not suspending + * return 1 if list empty */ + if ((list_empty(&data->queue) == 0) && + (!data->board_dat->suspend_sts) && + (data->status != STATUS_EXITING)) { + /* We have some more work to do (either there is more tranint + * bpw;sfer requests in the current message or there are + *more messages) + */ + dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__); + queue_work(data->wk, &data->work); + } else if (data->board_dat->suspend_sts || + data->status == STATUS_EXITING) { + dev_dbg(&data->master->dev, + "%s suspend/remove initiated, flushing queue\n", + __func__); + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -EIO; + + if (pmsg->complete) + pmsg->complete(pmsg->context); + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + } +} + +static void pch_spi_set_ir(struct pch_spi_data *data) +{ + /* enable interrupts */ + if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) { + /* set receive threhold to PCH_RX_THOLD */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + PCH_RX_THOLD << SPCR_TFIC_FIELD, + ~MASK_TFIC_SPCR_BITS); + /* enable FI and RFI interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + SPCR_RFIE_BIT | SPCR_TFIE_BIT, 0); + } else { + /* set receive threhold to maximum */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD, + ~MASK_TFIC_SPCR_BITS); + /* enable FI interrupt */ + pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0); + } + + dev_dbg(&data->master->dev, + "%s:invoking pch_spi_set_enable to enable SPI\n", __func__); + + /* SPI set enable */ + pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0); + + /* Wait until the transfer completes; go to sleep after + initiating the transfer. */ + dev_dbg(&data->master->dev, + "%s:waiting for transfer to get over\n", __func__); + + wait_event_interruptible(data->wait, data->transfer_complete); + + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); + dev_dbg(&data->master->dev, + "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); + + data->transfer_active = false; + dev_dbg(&data->master->dev, + "%s set data->transfer_active = false\n", __func__); + + /* clear all interrupts */ + pch_spi_writereg(data->master, PCH_SPSR, + pch_spi_readreg(data->master, PCH_SPSR)); + /* disable interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); +} + +static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw) +{ + int j; + u8 *rx_buf; + u16 *rx_sbuf; + + /* copy Rx Data */ + if (!data->cur_trans->rx_buf) + return; + + if (bpw == 8) { + rx_buf = data->cur_trans->rx_buf; + for (j = 0; j < data->bpw_len; j++) + *rx_buf++ = data->pkt_rx_buff[j] & 0xFF; + } else { + rx_sbuf = data->cur_trans->rx_buf; + for (j = 0; j < data->bpw_len; j++) + *rx_sbuf++ = data->pkt_rx_buff[j]; + } +} + + +static void pch_spi_process_messages(struct work_struct *pwork) +{ + struct spi_message *pmsg; + struct pch_spi_data *data; + int bpw; + + data = container_of(pwork, struct pch_spi_data, work); + dev_dbg(&data->master->dev, "%s data initialized\n", __func__); + + spin_lock(&data->lock); + + /* check if suspend has been initiated;if yes flush queue */ + if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { + dev_dbg(&data->master->dev, + "%s suspend/remove initiated,flushing queue\n", + __func__); + + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -EIO; + + if (pmsg->complete != 0) { + spin_unlock(&data->lock); + pmsg->complete(pmsg->context); + spin_lock(&data->lock); + } + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + + spin_unlock(&data->lock); + return; + } + + data->bcurrent_msg_processing = true; + dev_dbg(&data->master->dev, + "%s Set data->bcurrent_msg_processing= true\n", __func__); + + /* Get the message from the queue and delete it from there. */ + data->current_msg = list_entry(data->queue.next, struct spi_message, + queue); + + list_del_init(&data->current_msg->queue); + + data->current_msg->status = 0; + + pch_spi_select_chip(data, data->current_msg->spi); + + spin_unlock(&data->lock); + + do { + /* If we are already processing a message get the next + transfer structure from the message otherwise retrieve + the 1st transfer request from the message. */ + spin_lock(&data->lock); + + if (data->cur_trans == NULL) { + data->cur_trans = + list_entry(data->current_msg->transfers. + next, struct spi_transfer, + transfer_list); + dev_dbg(&data->master->dev, + "%s :Getting 1st transfer message\n", __func__); + } else { + data->cur_trans = + list_entry(data->cur_trans->transfer_list.next, + struct spi_transfer, + transfer_list); + dev_dbg(&data->master->dev, + "%s :Getting next transfer message\n", + __func__); + } + + spin_unlock(&data->lock); + + pch_spi_set_tx(data, &bpw, &pmsg); + + /* Control interrupt*/ + pch_spi_set_ir(data); + + /* Disable SPI transfer */ + pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0, + SPCR_SPE_BIT); + + /* clear FIFO */ + pch_spi_clear_fifo(data->master); + + /* copy Rx Data */ + pch_spi_copy_rx_data(data, bpw); + + /* free memory */ + kfree(data->pkt_rx_buff); + data->pkt_rx_buff = NULL; + + kfree(data->pkt_tx_buff); + data->pkt_tx_buff = NULL; + + /* increment message count */ + data->current_msg->actual_length += data->cur_trans->len; + + dev_dbg(&data->master->dev, + "%s:data->current_msg->actual_length=%d\n", + __func__, data->current_msg->actual_length); + + /* check for delay */ + if (data->cur_trans->delay_usecs) { + dev_dbg(&data->master->dev, "%s:" + "delay in usec=%d\n", __func__, + data->cur_trans->delay_usecs); + udelay(data->cur_trans->delay_usecs); + } + + spin_lock(&data->lock); + + /* No more transfer in this message. */ + if ((data->cur_trans->transfer_list.next) == + &(data->current_msg->transfers)) { + pch_spi_nomore_transfer(data, pmsg); + } + + spin_unlock(&data->lock); + + } while (data->cur_trans != NULL); +} + +static void pch_spi_free_resources(struct pch_spi_board_data *board_dat) +{ + dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); + + /* free workqueue */ + if (board_dat->data->wk != NULL) { + destroy_workqueue(board_dat->data->wk); + board_dat->data->wk = NULL; + dev_dbg(&board_dat->pdev->dev, + "%s destroy_workqueue invoked successfully\n", + __func__); + } + + /* disable interrupts & free IRQ */ + if (board_dat->irq_reg_sts) { + /* disable interrupts */ + pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, + PCH_ALL); + + /* free IRQ */ + free_irq(board_dat->pdev->irq, board_dat); + + dev_dbg(&board_dat->pdev->dev, + "%s free_irq invoked successfully\n", __func__); + + board_dat->irq_reg_sts = false; + } + + /* unmap PCI base address */ + if (board_dat->data->io_remap_addr != 0) { + pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr); + + board_dat->data->io_remap_addr = 0; + + dev_dbg(&board_dat->pdev->dev, + "%s pci_iounmap invoked successfully\n", __func__); + } + + /* release PCI region */ + if (board_dat->pci_req_sts) { + pci_release_regions(board_dat->pdev); + dev_dbg(&board_dat->pdev->dev, + "%s pci_release_regions invoked successfully\n", + __func__); + board_dat->pci_req_sts = false; + } +} + +static int pch_spi_get_resources(struct pch_spi_board_data *board_dat) +{ + void __iomem *io_remap_addr; + int retval; + dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); + + /* create workqueue */ + board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME); + if (!board_dat->data->wk) { + dev_err(&board_dat->pdev->dev, + "%s create_singlet hread_workqueue failed\n", __func__); + retval = -EBUSY; + goto err_return; + } + + dev_dbg(&board_dat->pdev->dev, + "%s create_singlethread_workqueue success\n", __func__); + + retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME); + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s request_region failed\n", __func__); + goto err_return; + } + + board_dat->pci_req_sts = true; + + io_remap_addr = pci_iomap(board_dat->pdev, 1, 0); + if (io_remap_addr == 0) { + dev_err(&board_dat->pdev->dev, + "%s pci_iomap failed\n", __func__); + retval = -ENOMEM; + goto err_return; + } + + /* calculate base address for all channels */ + board_dat->data->io_remap_addr = io_remap_addr; + + /* reset PCH SPI h/w */ + pch_spi_reset(board_dat->data->master); + dev_dbg(&board_dat->pdev->dev, + "%s pch_spi_reset invoked successfully\n", __func__); + + /* register IRQ */ + retval = request_irq(board_dat->pdev->irq, pch_spi_handler, + IRQF_SHARED, KBUILD_MODNAME, board_dat); + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s request_irq failed\n", __func__); + goto err_return; + } + + dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n", + __func__, retval); + + board_dat->irq_reg_sts = true; + dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__); + +err_return: + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s FAIL:invoking pch_spi_free_resources\n", __func__); + pch_spi_free_resources(board_dat); + } + + dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval); + + return retval; +} + +static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + + struct spi_master *master; + + struct pch_spi_board_data *board_dat; + int retval; + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + /* allocate memory for private data */ + board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); + if (board_dat == NULL) { + dev_err(&pdev->dev, + " %s memory allocation for private data failed\n", + __func__); + retval = -ENOMEM; + goto err_kmalloc; + } + + dev_dbg(&pdev->dev, + "%s memory allocation for private data success\n", __func__); + + /* enable PCI device */ + retval = pci_enable_device(pdev); + if (retval != 0) { + dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__); + + goto err_pci_en_device; + } + + dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n", + __func__, retval); + + board_dat->pdev = pdev; + + /* alllocate memory for SPI master */ + master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data)); + if (master == NULL) { + retval = -ENOMEM; + dev_err(&pdev->dev, "%s Fail.\n", __func__); + goto err_spi_alloc_master; + } + + dev_dbg(&pdev->dev, + "%s spi_alloc_master returned non NULL\n", __func__); + + /* initialize members of SPI master */ + master->bus_num = -1; + master->num_chipselect = PCH_MAX_CS; + master->setup = pch_spi_setup; + master->transfer = pch_spi_transfer; + dev_dbg(&pdev->dev, + "%s transfer member of SPI master initialized\n", __func__); + + board_dat->data = spi_master_get_devdata(master); + + board_dat->data->master = master; + board_dat->data->n_curnt_chip = 255; + board_dat->data->board_dat = board_dat; + board_dat->data->status = STATUS_RUNNING; + + INIT_LIST_HEAD(&board_dat->data->queue); + spin_lock_init(&board_dat->data->lock); + INIT_WORK(&board_dat->data->work, pch_spi_process_messages); + init_waitqueue_head(&board_dat->data->wait); + + /* allocate resources for PCH SPI */ + retval = pch_spi_get_resources(board_dat); + if (retval) { + dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval); + goto err_spi_get_resources; + } + + dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n", + __func__, retval); + + /* save private data in dev */ + pci_set_drvdata(pdev, board_dat); + dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__); + + /* set master mode */ + pch_spi_set_master_mode(master); + dev_dbg(&pdev->dev, + "%s invoked pch_spi_set_master_mode\n", __func__); + + /* Register the controller with the SPI core. */ + retval = spi_register_master(master); + if (retval != 0) { + dev_err(&pdev->dev, + "%s spi_register_master FAILED\n", __func__); + goto err_spi_reg_master; + } + + dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n", + __func__, retval); + + + return 0; + +err_spi_reg_master: + spi_unregister_master(master); +err_spi_get_resources: +err_spi_alloc_master: + spi_master_put(master); + pci_disable_device(pdev); +err_pci_en_device: + kfree(board_dat); +err_kmalloc: + return retval; +} + +static void pch_spi_remove(struct pci_dev *pdev) +{ + struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); + int count; + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board_dat) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return; + } + + /* check for any pending messages; no action is taken if the queue + * is still full; but at least we tried. Unload anyway */ + count = 500; + spin_lock(&board_dat->data->lock); + board_dat->data->status = STATUS_EXITING; + while ((list_empty(&board_dat->data->queue) == 0) && --count) { + dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n", + __func__); + spin_unlock(&board_dat->data->lock); + msleep(PCH_SLEEP_TIME); + spin_lock(&board_dat->data->lock); + } + spin_unlock(&board_dat->data->lock); + + /* Free resources allocated for PCH SPI */ + pch_spi_free_resources(board_dat); + + spi_unregister_master(board_dat->data->master); + + /* free memory for private data */ + kfree(board_dat); + + pci_set_drvdata(pdev, NULL); + + /* disable PCI device */ + pci_disable_device(pdev); + + dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__); +} + +#ifdef CONFIG_PM +static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + u8 count; + int retval; + + struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board_dat) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return -EFAULT; + } + + retval = 0; + board_dat->suspend_sts = true; + + /* check if the current message is processed: + Only after thats done the transfer will be suspended */ + count = 255; + while ((--count) > 0) { + if (!(board_dat->data->bcurrent_msg_processing)) { + dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_" + "msg_processing = false\n", __func__); + break; + } else { + dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_" + "processing = true\n", __func__); + } + msleep(PCH_SLEEP_TIME); + } + + /* Free IRQ */ + if (board_dat->irq_reg_sts) { + /* disable all interrupts */ + pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, + PCH_ALL); + pch_spi_reset(board_dat->data->master); + + free_irq(board_dat->pdev->irq, board_dat); + + board_dat->irq_reg_sts = false; + dev_dbg(&pdev->dev, + "%s free_irq invoked successfully.\n", __func__); + } + + /* save config space */ + retval = pci_save_state(pdev); + + if (retval == 0) { + dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n", + __func__, retval); + /* disable PM notifications */ + pci_enable_wake(pdev, PCI_D3hot, 0); + dev_dbg(&pdev->dev, + "%s pci_enable_wake invoked successfully\n", __func__); + /* disable PCI device */ + pci_disable_device(pdev); + dev_dbg(&pdev->dev, + "%s pci_disable_device invoked successfully\n", + __func__); + /* move device to D3hot state */ + pci_set_power_state(pdev, PCI_D3hot); + dev_dbg(&pdev->dev, + "%s pci_set_power_state invoked successfully\n", + __func__); + } else { + dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__); + } + + dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval); + + return retval; +} + +static int pch_spi_resume(struct pci_dev *pdev) +{ + int retval; + + struct pch_spi_board_data *board = pci_get_drvdata(pdev); + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return -EFAULT; + } + + /* move device to DO power state */ + pci_set_power_state(pdev, PCI_D0); + + /* restore state */ + pci_restore_state(pdev); + + retval = pci_enable_device(pdev); + if (retval < 0) { + dev_err(&pdev->dev, + "%s pci_enable_device failed\n", __func__); + } else { + /* disable PM notifications */ + pci_enable_wake(pdev, PCI_D3hot, 0); + + /* register IRQ handler */ + if (!board->irq_reg_sts) { + /* register IRQ */ + retval = request_irq(board->pdev->irq, pch_spi_handler, + IRQF_SHARED, KBUILD_MODNAME, + board); + if (retval < 0) { + dev_err(&pdev->dev, + "%s request_irq failed\n", __func__); + return retval; + } + board->irq_reg_sts = true; + + /* reset PCH SPI h/w */ + pch_spi_reset(board->data->master); + pch_spi_set_master_mode(board->data->master); + + /* set suspend status to false */ + board->suspend_sts = false; + + } + } + + dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval); + + return retval; +} +#else +#define pch_spi_suspend NULL +#define pch_spi_resume NULL + +#endif + +static struct pci_driver pch_spi_pcidev = { + .name = "pch_spi", + .id_table = pch_spi_pcidev_id, + .probe = pch_spi_probe, + .remove = pch_spi_remove, + .suspend = pch_spi_suspend, + .resume = pch_spi_resume, +}; + +static int __init pch_spi_init(void) +{ + return pci_register_driver(&pch_spi_pcidev); +} +module_init(pch_spi_init); + +static void __exit pch_spi_exit(void) +{ + pci_unregister_driver(&pch_spi_pcidev); +} +module_exit(pch_spi_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver"); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index ea1bec3c9a1..4e6245e6799 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -545,6 +545,7 @@ static const struct file_operations spidev_fops = { .unlocked_ioctl = spidev_ioctl, .open = spidev_open, .release = spidev_release, + .llseek = no_llseek, }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c index bf9540f5fb9..a3938958147 100644 --- a/drivers/spi/tle62x0.c +++ b/drivers/spi/tle62x0.c @@ -11,6 +11,7 @@ #include <linux/device.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/tle62x0.h> diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 9f386379c16..80f2db5bcfd 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c @@ -93,6 +93,26 @@ struct xilinx_spi { void (*rx_fn) (struct xilinx_spi *); }; +static void xspi_write32(u32 val, void __iomem *addr) +{ + iowrite32(val, addr); +} + +static unsigned int xspi_read32(void __iomem *addr) +{ + return ioread32(addr); +} + +static void xspi_write32_be(u32 val, void __iomem *addr) +{ + iowrite32be(val, addr); +} + +static unsigned int xspi_read32_be(void __iomem *addr) +{ + return ioread32be(addr); +} + static void xspi_tx8(struct xilinx_spi *xspi) { xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); @@ -370,15 +390,18 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, master->bus_num = bus_num; master->num_chipselect = pdata->num_chipselect; +#ifdef CONFIG_OF + master->dev.of_node = dev->of_node; +#endif xspi->mem = *mem; xspi->irq = irq; if (pdata->little_endian) { - xspi->read_fn = ioread32; - xspi->write_fn = iowrite32; + xspi->read_fn = xspi_read32; + xspi->write_fn = xspi_write32; } else { - xspi->read_fn = ioread32be; - xspi->write_fn = iowrite32be; + xspi->read_fn = xspi_read32_be; + xspi->write_fn = xspi_write32_be; } xspi->bits_per_word = pdata->bits_per_word; if (xspi->bits_per_word == 8) { diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c index 71dc3adc049..b66c2dbf20a 100644 --- a/drivers/spi/xilinx_spi_of.c +++ b/drivers/spi/xilinx_spi_of.c @@ -27,7 +27,9 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/slab.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <linux/of_spi.h> @@ -36,7 +38,7 @@ #include "xilinx_spi.h" -static int __devinit xilinx_spi_of_probe(struct of_device *ofdev, +static int __devinit xilinx_spi_of_probe(struct platform_device *ofdev, const struct of_device_id *match) { struct spi_master *master; @@ -47,13 +49,13 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev, const u32 *prop; int len; - rc = of_address_to_resource(ofdev->node, 0, &r_mem); + rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem); if (rc) { dev_warn(&ofdev->dev, "invalid address\n"); return rc; } - rc = of_irq_to_resource(ofdev->node, 0, &r_irq); + rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq); if (rc == NO_IRQ) { dev_warn(&ofdev->dev, "no IRQ found\n"); return -ENODEV; @@ -66,7 +68,7 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev, return -ENOMEM; /* number of slave select bits is required */ - prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len); + prop = of_get_property(ofdev->dev.of_node, "xlnx,num-ss-bits", &len); if (!prop || len < sizeof(*prop)) { dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n"); return -EINVAL; @@ -79,13 +81,10 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev, dev_set_drvdata(&ofdev->dev, master); - /* Add any subnodes on the SPI bus */ - of_register_spi_devices(master, ofdev->node); - return 0; } -static int __devexit xilinx_spi_remove(struct of_device *ofdev) +static int __devexit xilinx_spi_remove(struct platform_device *ofdev) { xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev)); dev_set_drvdata(&ofdev->dev, 0); @@ -94,12 +93,12 @@ static int __devexit xilinx_spi_remove(struct of_device *ofdev) return 0; } -static int __exit xilinx_spi_of_remove(struct of_device *op) +static int __exit xilinx_spi_of_remove(struct platform_device *op) { return xilinx_spi_remove(op); } -static struct of_device_id xilinx_spi_of_match[] = { +static const struct of_device_id xilinx_spi_of_match[] = { { .compatible = "xlnx,xps-spi-2.00.a", }, { .compatible = "xlnx,xps-spi-2.00.b", }, {} @@ -108,12 +107,12 @@ static struct of_device_id xilinx_spi_of_match[] = { MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); static struct of_platform_driver xilinx_spi_of_driver = { - .match_table = xilinx_spi_of_match, .probe = xilinx_spi_of_probe, .remove = __exit_p(xilinx_spi_of_remove), .driver = { .name = "xilinx-xps-spi", .owner = THIS_MODULE, + .of_match_table = xilinx_spi_of_match, }, }; |
