aboutsummaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig228
-rw-r--r--drivers/spi/Makefile24
-rw-r--r--drivers/spi/spi-adi-v3.c986
-rw-r--r--drivers/spi/spi-altera.c66
-rw-r--r--drivers/spi/spi-ath79.c135
-rw-r--r--drivers/spi/spi-atmel.c1243
-rw-r--r--drivers/spi/spi-au1550.c68
-rw-r--r--drivers/spi/spi-bcm2835.c408
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c474
-rw-r--r--drivers/spi/spi-bcm63xx.c347
-rw-r--r--drivers/spi/spi-bfin-sport.c56
-rw-r--r--drivers/spi/spi-bfin5xx.c92
-rw-r--r--drivers/spi/spi-bitbang-txrx.h2
-rw-r--r--drivers/spi/spi-bitbang.c294
-rw-r--r--drivers/spi/spi-butterfly.c18
-rw-r--r--drivers/spi/spi-cadence.c673
-rw-r--r--drivers/spi/spi-clps711x.c247
-rw-r--r--drivers/spi/spi-coldfire-qspi.c212
-rw-r--r--drivers/spi/spi-davinci.c197
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-dw-mmio.c109
-rw-r--r--drivers/spi/spi-dw-pci.c56
-rw-r--r--drivers/spi/spi-dw.c268
-rw-r--r--drivers/spi/spi-dw.h29
-rw-r--r--drivers/spi/spi-efm32.c500
-rw-r--r--drivers/spi/spi-ep93xx.c390
-rw-r--r--drivers/spi/spi-falcon.c23
-rw-r--r--drivers/spi/spi-fsl-cpm.c388
-rw-r--r--drivers/spi/spi-fsl-cpm.h43
-rw-r--r--drivers/spi/spi-fsl-dspi.c574
-rw-r--r--drivers/spi/spi-fsl-espi.c146
-rw-r--r--drivers/spi/spi-fsl-lib.c34
-rw-r--r--drivers/spi/spi-fsl-lib.h16
-rw-r--r--drivers/spi/spi-fsl-spi.c663
-rw-r--r--drivers/spi/spi-fsl-spi.h72
-rw-r--r--drivers/spi/spi-gpio.c72
-rw-r--r--drivers/spi/spi-imx.c169
-rw-r--r--drivers/spi/spi-lm70llp.c2
-rw-r--r--drivers/spi/spi-mpc512x-psc.c488
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c20
-rw-r--r--drivers/spi/spi-mpc52xx.c35
-rw-r--r--drivers/spi/spi-mxs.c327
-rw-r--r--drivers/spi/spi-nuc900.c113
-rw-r--r--drivers/spi/spi-oc-tiny.c115
-rw-r--r--drivers/spi/spi-octeon.c137
-rw-r--r--drivers/spi/spi-omap-100k.c368
-rw-r--r--drivers/spi/spi-omap-uwire.c48
-rw-r--r--drivers/spi/spi-omap2-mcspi.c552
-rw-r--r--drivers/spi/spi-orion.c165
-rw-r--r--drivers/spi/spi-pl022.c223
-rw-r--r--drivers/spi/spi-ppc4xx.c35
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c376
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c203
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c489
-rw-r--r--drivers/spi/spi-pxa2xx.c1178
-rw-r--r--drivers/spi/spi-pxa2xx.h221
-rw-r--r--drivers/spi/spi-qup.c761
-rw-r--r--drivers/spi/spi-rspi.c1358
-rw-r--r--drivers/spi/spi-s3c24xx.c126
-rw-r--r--drivers/spi/spi-s3c64xx.c1014
-rw-r--r--drivers/spi/spi-sc18is602.c51
-rw-r--r--drivers/spi/spi-sh-hspi.c139
-rw-r--r--drivers/spi/spi-sh-msiof.c463
-rw-r--r--drivers/spi/spi-sh-sci.c17
-rw-r--r--drivers/spi/spi-sh.c29
-rw-r--r--drivers/spi/spi-sirf.c497
-rw-r--r--drivers/spi/spi-stmp.c664
-rw-r--r--drivers/spi/spi-sun4i.c477
-rw-r--r--drivers/spi/spi-sun6i.c483
-rw-r--r--drivers/spi/spi-tegra114.c1229
-rw-r--r--drivers/spi/spi-tegra20-sflash.c622
-rw-r--r--drivers/spi/spi-tegra20-slink.c1239
-rw-r--r--drivers/spi/spi-ti-qspi.c581
-rw-r--r--drivers/spi/spi-ti-ssp.c392
-rw-r--r--drivers/spi/spi-tle62x0.c15
-rw-r--r--drivers/spi/spi-topcliff-pch.c144
-rw-r--r--drivers/spi/spi-txx9.c71
-rw-r--r--drivers/spi/spi-xcomm.c39
-rw-r--r--drivers/spi/spi-xilinx.c317
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c170
-rw-r--r--drivers/spi/spi.c918
-rw-r--r--drivers/spi/spidev.c48
82 files changed, 18054 insertions, 8231 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1acae359cab..213b5cbb9dc 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -61,7 +61,7 @@ config SPI_ALTERA
config SPI_ATH79
tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
- depends on ATH79 && GENERIC_GPIO
+ depends on ATH79 && GPIOLIB
select SPI_BITBANG
help
This enables support for the SPI controller present on the
@@ -69,17 +69,35 @@ config SPI_ATH79
config SPI_ATMEL
tristate "Atmel SPI Controller"
- depends on (ARCH_AT91 || AVR32)
+ depends on (ARCH_AT91 || AVR32 || COMPILE_TEST)
help
This selects a driver for the Atmel SPI Controller, present on
many AT32 (AVR32) and AT91 (ARM) chips.
+config SPI_BCM2835
+ tristate "BCM2835 SPI controller"
+ depends on ARCH_BCM2835 || COMPILE_TEST
+ help
+ This selects a driver for the Broadcom BCM2835 SPI master.
+
+ The BCM2835 contains two types of SPI master controller; the
+ "universal SPI master", and the regular SPI controller. This driver
+ is for the regular SPI controller. Slave mode operation is not also
+ not supported.
+
config SPI_BFIN5XX
tristate "SPI controller driver for ADI Blackfin5xx"
- depends on BLACKFIN
+ depends on BLACKFIN && !BF60x
help
This is the SPI controller master driver for Blackfin 5xx processor.
+config SPI_ADI_V3
+ tristate "SPI controller v3 for ADI"
+ depends on BF60x
+ help
+ This is the SPI controller v3 master driver
+ found on Blackfin 60x processor.
+
config SPI_BFIN_SPORT
tristate "SPI bus via Blackfin SPORT"
depends on BLACKFIN
@@ -88,7 +106,7 @@ config SPI_BFIN_SPORT
config SPI_AU1550
tristate "Au1550/Au1200/Au1300 SPI Controller"
- depends on MIPS_ALCHEMY && EXPERIMENTAL
+ depends on MIPS_ALCHEMY
select SPI_BITBANG
help
If you say yes to this option, support will be included for the
@@ -100,6 +118,13 @@ config SPI_BCM63XX
help
Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
+config SPI_BCM63XX_HSSPI
+ tristate "Broadcom BCM63XX HS SPI controller driver"
+ depends on BCM63XX || COMPILE_TEST
+ help
+ This enables support for the High Speed SPI controller present on
+ newer Broadcom BCM63XX SoCs.
+
config SPI_BITBANG
tristate "Utilities for Bitbanging SPI masters"
help
@@ -123,6 +148,20 @@ config SPI_BUTTERFLY
inexpensive battery powered microcontroller evaluation board.
This same cable can be used to flash new firmware.
+config SPI_CADENCE
+ tristate "Cadence SPI controller"
+ depends on ARM
+ help
+ This selects the Cadence SPI controller master driver
+ used by Xilinx Zynq.
+
+config SPI_CLPS711X
+ tristate "CLPS711X host SPI controller"
+ depends on ARCH_CLPS711X || COMPILE_TEST
+ help
+ This enables dedicated general purpose SPI/Microwire1-compatible
+ master mode interface (SSI1) for CLPS711X-based CPUs.
+
config SPI_COLDFIRE_QSPI
tristate "Freescale Coldfire QSPI controller"
depends on (M520x || M523x || M5249 || M525x || M527x || M528x || M532x)
@@ -132,15 +171,21 @@ config SPI_COLDFIRE_QSPI
config SPI_DAVINCI
tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
- depends on ARCH_DAVINCI
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE
select SPI_BITBANG
- select TI_EDMA
help
SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
+config SPI_EFM32
+ tristate "EFM32 SPI controller"
+ depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
+ select SPI_BITBANG
+ help
+ Driver for the spi controller found on Energy Micro's EFM32 SoCs.
+
config SPI_EP93XX
tristate "Cirrus Logic EP93xx SPI controller"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
help
This enables using the Cirrus EP93xx SPI controller in master
mode.
@@ -156,7 +201,7 @@ config SPI_FALCON
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select SPI_BITBANG
help
This simple GPIO bitbanging SPI master uses the arch-neutral GPIO
@@ -172,16 +217,15 @@ config SPI_GPIO
config SPI_IMX
tristate "Freescale i.MX SPI controllers"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
select SPI_BITBANG
- default m if IMX_HAVE_PLATFORM_SPI_IMX
help
This enables using the Freescale i.MX SPI controllers in master
mode.
config SPI_LM70_LLP
tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
- depends on PARPORT && EXPERIMENTAL
+ depends on PARPORT
select SPI_BITBANG
help
This driver supports the NS LM70 LLP Evaluation Board,
@@ -197,7 +241,7 @@ config SPI_MPC52xx
config SPI_MPC52xx_PSC
tristate "Freescale MPC52xx PSC SPI controller"
- depends on PPC_MPC52xx && EXPERIMENTAL
+ depends on PPC_MPC52xx
help
This enables using the Freescale MPC52xx Programmable Serial
Controller in master SPI mode.
@@ -211,16 +255,32 @@ config SPI_MPC512x_PSC
config SPI_FSL_LIB
tristate
+ depends on OF
+
+config SPI_FSL_CPM
+ tristate
depends on FSL_SOC
config SPI_FSL_SPI
- bool "Freescale SPI controller"
- depends on FSL_SOC
+ bool "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
+ depends on OF
select SPI_FSL_LIB
+ select SPI_FSL_CPM if FSL_SOC
help
This enables using the Freescale SPI controllers in master mode.
MPC83xx platform uses the controller in cpu mode or CPM/QE mode.
MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
+ This also enables using the Aeroflex Gaisler GRLIB SPI controller in
+ master mode.
+
+config SPI_FSL_DSPI
+ tristate "Freescale DSPI controller"
+ select SPI_BITBANG
+ select REGMAP_MMIO
+ depends on SOC_VF610 || COMPILE_TEST
+ help
+ This enables support for the Freescale DSPI controller in master
+ mode. VF610 platform uses the controller.
config SPI_FSL_ESPI
bool "Freescale eSPI controller"
@@ -233,14 +293,14 @@ config SPI_FSL_ESPI
config SPI_OC_TINY
tristate "OpenCores tiny SPI"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select SPI_BITBANG
help
This is the driver for OpenCores tiny SPI master controller.
config SPI_OCTEON
tristate "Cavium OCTEON SPI controller"
- depends on CPU_CAVIUM_OCTEON
+ depends on CAVIUM_OCTEON_SOC
help
SPI host driver for the hardware found on some Cavium OCTEON
SOCs.
@@ -254,20 +314,29 @@ config SPI_OMAP_UWIRE
config SPI_OMAP24XX
tristate "McSPI driver for OMAP"
- depends on ARCH_OMAP2PLUS
+ depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
SPI master controller for OMAP24XX and later Multichannel SPI
(McSPI) modules.
+config SPI_TI_QSPI
+ tristate "DRA7xxx QSPI controller support"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ help
+ QSPI master controller for DRA7xxx used for flash devices.
+ This device supports single, dual and quad read support, while
+ it only supports single write mode.
+
config SPI_OMAP_100K
tristate "OMAP SPI 100K"
- depends on ARCH_OMAP850 || ARCH_OMAP730
+ depends on ARCH_OMAP850 || ARCH_OMAP730 || COMPILE_TEST
help
OMAP SPI 100K master controller for omap7xx boards.
config SPI_ORION
- tristate "Orion SPI master (EXPERIMENTAL)"
- depends on PLAT_ORION && EXPERIMENTAL
+ tristate "Orion SPI master"
+ depends on PLAT_ORION || COMPILE_TEST
help
This enables using the SPI master controller on the Orion chips.
@@ -290,9 +359,20 @@ config SPI_PPC4xx
help
This selects a driver for the PPC4xx SPI Controller.
+config SPI_PXA2XX_PXADMA
+ bool "PXA2xx SSP legacy PXA DMA API support"
+ depends on SPI_PXA2XX && ARCH_PXA
+ help
+ Enable PXA private legacy DMA API support. Note that this is
+ deprecated in favor of generic DMA engine API.
+
+config SPI_PXA2XX_DMA
+ def_bool y
+ depends on SPI_PXA2XX && !SPI_PXA2XX_PXADMA
+
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
- depends on (ARCH_PXA || (X86_32 && PCI)) && EXPERIMENTAL
+ depends on (ARCH_PXA || PCI || ACPI)
select PXA_SSP if ARCH_PXA
help
This enables using a PXA2xx or Sodaville SSP port as a SPI master
@@ -300,17 +380,30 @@ config SPI_PXA2XX
additional documentation can be found a Documentation/spi/pxa2xx.
config SPI_PXA2XX_PCI
- def_bool SPI_PXA2XX && X86_32 && PCI
+ def_tristate SPI_PXA2XX && PCI
config SPI_RSPI
- tristate "Renesas RSPI controller"
- depends on SUPERH
+ tristate "Renesas RSPI/QSPI controller"
+ depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE
help
- SPI driver for Renesas RSPI blocks.
+ SPI driver for Renesas RSPI and QSPI blocks.
+
+config SPI_QUP
+ tristate "Qualcomm SPI controller with QUP interface"
+ depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ help
+ Qualcomm Universal Peripheral (QUP) core is an AHB slave that
+ provides a common data path (an output FIFO and an input FIFO)
+ for serial peripheral interface (SPI) mini-core. SPI in master
+ mode supports up to 50MHz, up to four chip selects, programmable
+ data path from 4 bits to 32 bits and numerous protocol variants.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi_qup.
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
- depends on ARCH_S3C24XX && EXPERIMENTAL
+ depends on ARCH_S3C24XX
select SPI_BITBANG
help
SPI driver for Samsung S3C24XX series ARM SoCs
@@ -328,8 +421,8 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on (ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
- select S3C64XX_DMA if ARCH_S3C64XX
+ depends on PLAT_SAMSUNG
+ select S3C64XX_PL080 if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
@@ -341,14 +434,14 @@ config SPI_SC18IS602
config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller"
- depends on SUPERH && HAVE_CLK
- select SPI_BITBANG
+ depends on HAVE_CLK
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
help
- SPI driver for SuperH MSIOF blocks.
+ SPI driver for SuperH and SH Mobile MSIOF blocks.
config SPI_SH
tristate "SuperH SPI controller"
- depends on SUPERH
+ depends on SUPERH || COMPILE_TEST
help
SPI driver for SuperH SPI blocks.
@@ -361,22 +454,29 @@ config SPI_SH_SCI
config SPI_SH_HSPI
tristate "SuperH HSPI controller"
- depends on ARCH_SHMOBILE
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
SPI driver for SuperH HSPI blocks.
config SPI_SIRF
tristate "CSR SiRFprimaII SPI controller"
- depends on ARCH_PRIMA2
+ depends on SIRF_DMA
select SPI_BITBANG
help
SPI driver for CSR SiRFprimaII SoCs
-config SPI_STMP3XXX
- tristate "Freescale STMP37xx/378x SPI/SSP controller"
- depends on ARCH_STMP3XXX
+config SPI_SUN4I
+ tristate "Allwinner A10 SoCs SPI controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
help
- SPI driver for Freescale STMP37xx/378x SoC SSP interface
+ SPI driver for Allwinner sun4i, sun5i and sun7i SoCs
+
+config SPI_SUN6I
+ tristate "Allwinner A31 SPI controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ This enables using the SPI controller on the Allwinner A31 SoCs.
config SPI_MXS
tristate "Freescale MXS SPI controller"
@@ -385,16 +485,34 @@ config SPI_MXS
help
SPI driver for Freescale MXS devices.
-config SPI_TI_SSP
- tristate "TI Sequencer Serial Port - SPI Support"
- depends on MFD_TI_SSP
+config SPI_TEGRA114
+ tristate "NVIDIA Tegra114 SPI Controller"
+ depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller
+ is different than the older SoCs SPI controller and also register interface
+ get changed with this controller.
+
+config SPI_TEGRA20_SFLASH
+ tristate "Nvidia Tegra20 Serial flash Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on RESET_CONTROLLER
help
- This selects an SPI master implementation using a TI sequencer
- serial port.
+ SPI driver for Nvidia Tegra20 Serial flash Controller interface.
+ The main usecase of this controller is to use spi flash as boot
+ device.
+
+config SPI_TEGRA20_SLINK
+ tristate "Nvidia Tegra20/Tegra30 SLINK Controller"
+ depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
config SPI_TOPCLIFF_PCH
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
help
SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
used in some x86 embedded processors.
@@ -404,7 +522,7 @@ config SPI_TOPCLIFF_PCH
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
- depends on GENERIC_GPIO && CPU_TX49XX
+ depends on GPIOLIB && (CPU_TX49XX || COMPILE_TEST)
help
SPI driver for Toshiba TXx9 MIPS SoCs
@@ -417,7 +535,7 @@ config SPI_XCOMM
config SPI_XILINX
tristate "Xilinx SPI controller common module"
- depends on HAS_IOMEM && EXPERIMENTAL
+ depends on HAS_IOMEM
select SPI_BITBANG
help
This exposes the SPI controller IP from the Xilinx EDK.
@@ -427,9 +545,22 @@ config SPI_XILINX
Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
+config SPI_XTENSA_XTFPGA
+ tristate "Xtensa SPI controller for xtfpga"
+ depends on (XTENSA && XTENSA_PLATFORM_XTFPGA) || COMPILE_TEST
+ select SPI_BITBANG
+ help
+ SPI driver for xtfpga SPI master controller.
+
+ This simple SPI master controller is built into xtfpga bitstreams
+ and is used to control daughterboard audio codec. It always transfers
+ 16 bit words in SPI mode 0, automatically asserting CS on transfer
+ start and deasserting on end.
+
+
config SPI_NUC900
tristate "Nuvoton NUC900 series SPI"
- depends on ARCH_W90X900 && EXPERIMENTAL
+ depends on ARCH_W90X900
select SPI_BITBANG
help
SPI driver for Nuvoton NUC900 series ARM SoCs
@@ -453,7 +584,7 @@ config SPI_DW_MID_DMA
config SPI_DW_MMIO
tristate "Memory-mapped io interface driver for DW SPI core"
- depends on SPI_DESIGNWARE && HAVE_CLK
+ depends on SPI_DESIGNWARE
#
# There are lots of SPI device types, with sensors and memory
@@ -463,7 +594,6 @@ comment "SPI Protocol Masters"
config SPI_SPIDEV
tristate "User mode SPI device driver support"
- depends on EXPERIMENTAL
help
This supports user mode SPI protocol drivers.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index c48df47e4b0..929c9f5eac0 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -14,19 +14,27 @@ obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
+obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
+obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
+obj-$(CONFIG_SPI_ADI_V3) += spi-adi-v3.o
obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
+obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
+obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o
+obj-$(CONFIG_SPI_EFM32) += spi-efm32.o
obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
obj-$(CONFIG_SPI_FALCON) += spi-falcon.o
+obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o
+obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
@@ -43,11 +51,16 @@ obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
+obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o
obj-$(CONFIG_SPI_ORION) += spi-orion.o
obj-$(CONFIG_SPI_PL022) += spi-pl022.o
obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
-obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o
+spi-pxa2xx-platform-objs := spi-pxa2xx.o
+spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o
+spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
+obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
+obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
spi-s3c24xx-hw-y := spi-s3c24xx.o
@@ -59,11 +72,14 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
-obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o
-obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
+obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
+obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
+obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
+obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
+obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
-
+obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
diff --git a/drivers/spi/spi-adi-v3.c b/drivers/spi/spi-adi-v3.c
new file mode 100644
index 00000000000..dcb2287c7f8
--- /dev/null
+++ b/drivers/spi/spi-adi-v3.c
@@ -0,0 +1,986 @@
+/*
+ * Analog Devices SPI3 controller driver
+ *
+ * Copyright (c) 2014 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/adi_spi3.h>
+#include <linux/types.h>
+
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+enum adi_spi_state {
+ START_STATE,
+ RUNNING_STATE,
+ DONE_STATE,
+ ERROR_STATE
+};
+
+struct adi_spi_master;
+
+struct adi_spi_transfer_ops {
+ void (*write) (struct adi_spi_master *);
+ void (*read) (struct adi_spi_master *);
+ void (*duplex) (struct adi_spi_master *);
+};
+
+/* runtime info for spi master */
+struct adi_spi_master {
+ /* SPI framework hookup */
+ struct spi_master *master;
+
+ /* Regs base of SPI controller */
+ struct adi_spi_regs __iomem *regs;
+
+ /* Pin request list */
+ u16 *pin_req;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct adi_spi_device *cur_chip;
+ unsigned transfer_len;
+
+ /* transfer buffer */
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+
+ /* dma info */
+ unsigned int tx_dma;
+ unsigned int rx_dma;
+ dma_addr_t tx_dma_addr;
+ dma_addr_t rx_dma_addr;
+ unsigned long dummy_buffer; /* used in unidirectional transfer */
+ unsigned long tx_dma_size;
+ unsigned long rx_dma_size;
+ int tx_num;
+ int rx_num;
+
+ /* store register value for suspend/resume */
+ u32 control;
+ u32 ssel;
+
+ unsigned long sclk;
+ enum adi_spi_state state;
+
+ const struct adi_spi_transfer_ops *ops;
+};
+
+struct adi_spi_device {
+ u32 control;
+ u32 clock;
+ u32 ssel;
+
+ u8 cs;
+ u16 cs_chg_udelay; /* Some devices require > 255usec delay */
+ u32 cs_gpio;
+ u32 tx_dummy_val; /* tx value for rx only transfer */
+ bool enable_dma;
+ const struct adi_spi_transfer_ops *ops;
+};
+
+static void adi_spi_enable(struct adi_spi_master *drv_data)
+{
+ u32 ctl;
+
+ ctl = ioread32(&drv_data->regs->control);
+ ctl |= SPI_CTL_EN;
+ iowrite32(ctl, &drv_data->regs->control);
+}
+
+static void adi_spi_disable(struct adi_spi_master *drv_data)
+{
+ u32 ctl;
+
+ ctl = ioread32(&drv_data->regs->control);
+ ctl &= ~SPI_CTL_EN;
+ iowrite32(ctl, &drv_data->regs->control);
+}
+
+/* Caculate the SPI_CLOCK register value based on input HZ */
+static u32 hz_to_spi_clock(u32 sclk, u32 speed_hz)
+{
+ u32 spi_clock = sclk / speed_hz;
+
+ if (spi_clock)
+ spi_clock--;
+ return spi_clock;
+}
+
+static int adi_spi_flush(struct adi_spi_master *drv_data)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ /* wait for stop and clear stat */
+ while (!(ioread32(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit)
+ cpu_relax();
+
+ iowrite32(0xFFFFFFFF, &drv_data->regs->status);
+
+ return limit;
+}
+
+/* Chip select operation functions for cs_change flag */
+static void adi_spi_cs_active(struct adi_spi_master *drv_data, struct adi_spi_device *chip)
+{
+ if (likely(chip->cs < MAX_CTRL_CS)) {
+ u32 reg;
+ reg = ioread32(&drv_data->regs->ssel);
+ reg &= ~chip->ssel;
+ iowrite32(reg, &drv_data->regs->ssel);
+ } else {
+ gpio_set_value(chip->cs_gpio, 0);
+ }
+}
+
+static void adi_spi_cs_deactive(struct adi_spi_master *drv_data,
+ struct adi_spi_device *chip)
+{
+ if (likely(chip->cs < MAX_CTRL_CS)) {
+ u32 reg;
+ reg = ioread32(&drv_data->regs->ssel);
+ reg |= chip->ssel;
+ iowrite32(reg, &drv_data->regs->ssel);
+ } else {
+ gpio_set_value(chip->cs_gpio, 1);
+ }
+
+ /* Move delay here for consistency */
+ if (chip->cs_chg_udelay)
+ udelay(chip->cs_chg_udelay);
+}
+
+/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */
+static inline void adi_spi_cs_enable(struct adi_spi_master *drv_data,
+ struct adi_spi_device *chip)
+{
+ if (chip->cs < MAX_CTRL_CS) {
+ u32 reg;
+ reg = ioread32(&drv_data->regs->ssel);
+ reg |= chip->ssel >> 8;
+ iowrite32(reg, &drv_data->regs->ssel);
+ }
+}
+
+static inline void adi_spi_cs_disable(struct adi_spi_master *drv_data,
+ struct adi_spi_device *chip)
+{
+ if (chip->cs < MAX_CTRL_CS) {
+ u32 reg;
+ reg = ioread32(&drv_data->regs->ssel);
+ reg &= ~(chip->ssel >> 8);
+ iowrite32(reg, &drv_data->regs->ssel);
+ }
+}
+
+/* stop controller and re-config current chip*/
+static void adi_spi_restore_state(struct adi_spi_master *drv_data)
+{
+ struct adi_spi_device *chip = drv_data->cur_chip;
+
+ /* Clear status and disable clock */
+ iowrite32(0xFFFFFFFF, &drv_data->regs->status);
+ iowrite32(0x0, &drv_data->regs->rx_control);
+ iowrite32(0x0, &drv_data->regs->tx_control);
+ adi_spi_disable(drv_data);
+
+ /* Load the registers */
+ iowrite32(chip->control, &drv_data->regs->control);
+ iowrite32(chip->clock, &drv_data->regs->clock);
+
+ adi_spi_enable(drv_data);
+ drv_data->tx_num = drv_data->rx_num = 0;
+ /* we always choose tx transfer initiate */
+ iowrite32(SPI_RXCTL_REN, &drv_data->regs->rx_control);
+ iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI, &drv_data->regs->tx_control);
+ adi_spi_cs_active(drv_data, chip);
+}
+
+/* discard invalid rx data and empty rfifo */
+static inline void dummy_read(struct adi_spi_master *drv_data)
+{
+ while (!(ioread32(&drv_data->regs->status) & SPI_STAT_RFE))
+ ioread32(&drv_data->regs->rfifo);
+}
+
+static void adi_spi_u8_write(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->tx < drv_data->tx_end) {
+ iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo);
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ ioread32(&drv_data->regs->rfifo);
+ }
+}
+
+static void adi_spi_u8_read(struct adi_spi_master *drv_data)
+{
+ u32 tx_val = drv_data->cur_chip->tx_dummy_val;
+
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(tx_val, &drv_data->regs->tfifo);
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo);
+ }
+}
+
+static void adi_spi_u8_duplex(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo);
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo);
+ }
+}
+
+static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u8 = {
+ .write = adi_spi_u8_write,
+ .read = adi_spi_u8_read,
+ .duplex = adi_spi_u8_duplex,
+};
+
+static void adi_spi_u16_write(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->tx < drv_data->tx_end) {
+ iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo);
+ drv_data->tx += 2;
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ ioread32(&drv_data->regs->rfifo);
+ }
+}
+
+static void adi_spi_u16_read(struct adi_spi_master *drv_data)
+{
+ u32 tx_val = drv_data->cur_chip->tx_dummy_val;
+
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(tx_val, &drv_data->regs->tfifo);
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
+ drv_data->rx += 2;
+ }
+}
+
+static void adi_spi_u16_duplex(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo);
+ drv_data->tx += 2;
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
+ drv_data->rx += 2;
+ }
+}
+
+static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u16 = {
+ .write = adi_spi_u16_write,
+ .read = adi_spi_u16_read,
+ .duplex = adi_spi_u16_duplex,
+};
+
+static void adi_spi_u32_write(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->tx < drv_data->tx_end) {
+ iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo);
+ drv_data->tx += 4;
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ ioread32(&drv_data->regs->rfifo);
+ }
+}
+
+static void adi_spi_u32_read(struct adi_spi_master *drv_data)
+{
+ u32 tx_val = drv_data->cur_chip->tx_dummy_val;
+
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(tx_val, &drv_data->regs->tfifo);
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
+ drv_data->rx += 4;
+ }
+}
+
+static void adi_spi_u32_duplex(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo);
+ drv_data->tx += 4;
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
+ drv_data->rx += 4;
+ }
+}
+
+static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u32 = {
+ .write = adi_spi_u32_write,
+ .read = adi_spi_u32_read,
+ .duplex = adi_spi_u32_duplex,
+};
+
+
+/* test if there is more transfer to be done */
+static void adi_spi_next_transfer(struct adi_spi_master *drv)
+{
+ struct spi_message *msg = drv->cur_msg;
+ struct spi_transfer *t = drv->cur_transfer;
+
+ /* Move to next transfer */
+ if (t->transfer_list.next != &msg->transfers) {
+ drv->cur_transfer = list_entry(t->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ drv->state = RUNNING_STATE;
+ } else {
+ drv->state = DONE_STATE;
+ drv->cur_transfer = NULL;
+ }
+}
+
+static void adi_spi_giveback(struct adi_spi_master *drv_data)
+{
+ struct adi_spi_device *chip = drv_data->cur_chip;
+
+ adi_spi_cs_deactive(drv_data, chip);
+ spi_finalize_current_message(drv_data->master);
+}
+
+static int adi_spi_setup_transfer(struct adi_spi_master *drv)
+{
+ struct spi_transfer *t = drv->cur_transfer;
+ u32 cr, cr_width;
+
+ if (t->tx_buf) {
+ drv->tx = (void *)t->tx_buf;
+ drv->tx_end = drv->tx + t->len;
+ } else {
+ drv->tx = NULL;
+ }
+
+ if (t->rx_buf) {
+ drv->rx = t->rx_buf;
+ drv->rx_end = drv->rx + t->len;
+ } else {
+ drv->rx = NULL;
+ }
+
+ drv->transfer_len = t->len;
+
+ /* bits per word setup */
+ switch (t->bits_per_word) {
+ case 8:
+ cr_width = SPI_CTL_SIZE08;
+ drv->ops = &adi_spi_transfer_ops_u8;
+ break;
+ case 16:
+ cr_width = SPI_CTL_SIZE16;
+ drv->ops = &adi_spi_transfer_ops_u16;
+ break;
+ case 32:
+ cr_width = SPI_CTL_SIZE32;
+ drv->ops = &adi_spi_transfer_ops_u32;
+ break;
+ default:
+ return -EINVAL;
+ }
+ cr = ioread32(&drv->regs->control) & ~SPI_CTL_SIZE;
+ cr |= cr_width;
+ iowrite32(cr, &drv->regs->control);
+
+ /* speed setup */
+ iowrite32(hz_to_spi_clock(drv->sclk, t->speed_hz), &drv->regs->clock);
+ return 0;
+}
+
+static int adi_spi_dma_xfer(struct adi_spi_master *drv_data)
+{
+ struct spi_transfer *t = drv_data->cur_transfer;
+ struct spi_message *msg = drv_data->cur_msg;
+ struct adi_spi_device *chip = drv_data->cur_chip;
+ u32 dma_config;
+ unsigned long word_count, word_size;
+ void *tx_buf, *rx_buf;
+
+ switch (t->bits_per_word) {
+ case 8:
+ dma_config = WDSIZE_8 | PSIZE_8;
+ word_count = drv_data->transfer_len;
+ word_size = 1;
+ break;
+ case 16:
+ dma_config = WDSIZE_16 | PSIZE_16;
+ word_count = drv_data->transfer_len / 2;
+ word_size = 2;
+ break;
+ default:
+ dma_config = WDSIZE_32 | PSIZE_32;
+ word_count = drv_data->transfer_len / 4;
+ word_size = 4;
+ break;
+ }
+
+ if (!drv_data->rx) {
+ tx_buf = drv_data->tx;
+ rx_buf = &drv_data->dummy_buffer;
+ drv_data->tx_dma_size = drv_data->transfer_len;
+ drv_data->rx_dma_size = sizeof(drv_data->dummy_buffer);
+ set_dma_x_modify(drv_data->tx_dma, word_size);
+ set_dma_x_modify(drv_data->rx_dma, 0);
+ } else if (!drv_data->tx) {
+ drv_data->dummy_buffer = chip->tx_dummy_val;
+ tx_buf = &drv_data->dummy_buffer;
+ rx_buf = drv_data->rx;
+ drv_data->tx_dma_size = sizeof(drv_data->dummy_buffer);
+ drv_data->rx_dma_size = drv_data->transfer_len;
+ set_dma_x_modify(drv_data->tx_dma, 0);
+ set_dma_x_modify(drv_data->rx_dma, word_size);
+ } else {
+ tx_buf = drv_data->tx;
+ rx_buf = drv_data->rx;
+ drv_data->tx_dma_size = drv_data->rx_dma_size
+ = drv_data->transfer_len;
+ set_dma_x_modify(drv_data->tx_dma, word_size);
+ set_dma_x_modify(drv_data->rx_dma, word_size);
+ }
+
+ drv_data->tx_dma_addr = dma_map_single(&msg->spi->dev,
+ (void *)tx_buf,
+ drv_data->tx_dma_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&msg->spi->dev,
+ drv_data->tx_dma_addr))
+ return -ENOMEM;
+
+ drv_data->rx_dma_addr = dma_map_single(&msg->spi->dev,
+ (void *)rx_buf,
+ drv_data->rx_dma_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&msg->spi->dev,
+ drv_data->rx_dma_addr)) {
+ dma_unmap_single(&msg->spi->dev,
+ drv_data->tx_dma_addr,
+ drv_data->tx_dma_size,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ dummy_read(drv_data);
+ set_dma_x_count(drv_data->tx_dma, word_count);
+ set_dma_x_count(drv_data->rx_dma, word_count);
+ set_dma_start_addr(drv_data->tx_dma, drv_data->tx_dma_addr);
+ set_dma_start_addr(drv_data->rx_dma, drv_data->rx_dma_addr);
+ dma_config |= DMAFLOW_STOP | RESTART | DI_EN;
+ set_dma_config(drv_data->tx_dma, dma_config);
+ set_dma_config(drv_data->rx_dma, dma_config | WNR);
+ enable_dma(drv_data->tx_dma);
+ enable_dma(drv_data->rx_dma);
+
+ iowrite32(SPI_RXCTL_REN | SPI_RXCTL_RDR_NE,
+ &drv_data->regs->rx_control);
+ iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF,
+ &drv_data->regs->tx_control);
+
+ return 0;
+}
+
+static int adi_spi_pio_xfer(struct adi_spi_master *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+
+ if (!drv_data->rx) {
+ /* write only half duplex */
+ drv_data->ops->write(drv_data);
+ if (drv_data->tx != drv_data->tx_end)
+ return -EIO;
+ } else if (!drv_data->tx) {
+ /* read only half duplex */
+ drv_data->ops->read(drv_data);
+ if (drv_data->rx != drv_data->rx_end)
+ return -EIO;
+ } else {
+ /* full duplex mode */
+ drv_data->ops->duplex(drv_data);
+ if (drv_data->tx != drv_data->tx_end)
+ return -EIO;
+ }
+
+ if (!adi_spi_flush(drv_data))
+ return -EIO;
+ msg->actual_length += drv_data->transfer_len;
+ tasklet_schedule(&drv_data->pump_transfers);
+ return 0;
+}
+
+static void adi_spi_pump_transfers(unsigned long data)
+{
+ struct adi_spi_master *drv_data = (struct adi_spi_master *)data;
+ struct spi_message *msg = NULL;
+ struct spi_transfer *t = NULL;
+ struct adi_spi_device *chip = NULL;
+ int ret;
+
+ /* Get current state information */
+ msg = drv_data->cur_msg;
+ t = drv_data->cur_transfer;
+ chip = drv_data->cur_chip;
+
+ /* Handle for abort */
+ if (drv_data->state == ERROR_STATE) {
+ msg->status = -EIO;
+ adi_spi_giveback(drv_data);
+ return;
+ }
+
+ if (drv_data->state == RUNNING_STATE) {
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+ if (t->cs_change)
+ adi_spi_cs_deactive(drv_data, chip);
+ adi_spi_next_transfer(drv_data);
+ t = drv_data->cur_transfer;
+ }
+ /* Handle end of message */
+ if (drv_data->state == DONE_STATE) {
+ msg->status = 0;
+ adi_spi_giveback(drv_data);
+ return;
+ }
+
+ if ((t->len == 0) || (t->tx_buf == NULL && t->rx_buf == NULL)) {
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ return;
+ }
+
+ ret = adi_spi_setup_transfer(drv_data);
+ if (ret) {
+ msg->status = ret;
+ adi_spi_giveback(drv_data);
+ }
+
+ iowrite32(0xFFFFFFFF, &drv_data->regs->status);
+ adi_spi_cs_active(drv_data, chip);
+ drv_data->state = RUNNING_STATE;
+
+ if (chip->enable_dma)
+ ret = adi_spi_dma_xfer(drv_data);
+ else
+ ret = adi_spi_pio_xfer(drv_data);
+ if (ret) {
+ msg->status = ret;
+ adi_spi_giveback(drv_data);
+ }
+}
+
+static int adi_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct adi_spi_master *drv_data = spi_master_get_devdata(master);
+
+ drv_data->cur_msg = m;
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+ adi_spi_restore_state(drv_data);
+
+ drv_data->state = START_STATE;
+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+
+ tasklet_schedule(&drv_data->pump_transfers);
+ return 0;
+}
+
+#define MAX_SPI_SSEL 7
+
+static const u16 ssel[][MAX_SPI_SSEL] = {
+ {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
+ P_SPI0_SSEL4, P_SPI0_SSEL5,
+ P_SPI0_SSEL6, P_SPI0_SSEL7},
+
+ {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3,
+ P_SPI1_SSEL4, P_SPI1_SSEL5,
+ P_SPI1_SSEL6, P_SPI1_SSEL7},
+
+ {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3,
+ P_SPI2_SSEL4, P_SPI2_SSEL5,
+ P_SPI2_SSEL6, P_SPI2_SSEL7},
+};
+
+static int adi_spi_setup(struct spi_device *spi)
+{
+ struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master);
+ struct adi_spi_device *chip = spi_get_ctldata(spi);
+ u32 ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE;
+ int ret = -EINVAL;
+
+ if (!chip) {
+ struct adi_spi3_chip *chip_info = spi->controller_data;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&spi->dev, "can not allocate chip data\n");
+ return -ENOMEM;
+ }
+ if (chip_info) {
+ if (chip_info->control & ~ctl_reg) {
+ dev_err(&spi->dev,
+ "do not set bits that the SPI framework manages\n");
+ goto error;
+ }
+ chip->control = chip_info->control;
+ chip->cs_chg_udelay = chip_info->cs_chg_udelay;
+ chip->tx_dummy_val = chip_info->tx_dummy_val;
+ chip->enable_dma = chip_info->enable_dma;
+ }
+ chip->cs = spi->chip_select;
+
+ if (chip->cs < MAX_CTRL_CS) {
+ chip->ssel = (1 << chip->cs) << 8;
+ ret = peripheral_request(ssel[spi->master->bus_num]
+ [chip->cs-1], dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "peripheral_request() error\n");
+ goto error;
+ }
+ } else {
+ chip->cs_gpio = chip->cs - MAX_CTRL_CS;
+ ret = gpio_request_one(chip->cs_gpio, GPIOF_OUT_INIT_HIGH,
+ dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "gpio_request_one() error\n");
+ goto error;
+ }
+ }
+ spi_set_ctldata(spi, chip);
+ }
+
+ /* force a default base state */
+ chip->control &= ctl_reg;
+
+ if (spi->mode & SPI_CPOL)
+ chip->control |= SPI_CTL_CPOL;
+ if (spi->mode & SPI_CPHA)
+ chip->control |= SPI_CTL_CPHA;
+ if (spi->mode & SPI_LSB_FIRST)
+ chip->control |= SPI_CTL_LSBF;
+ chip->control |= SPI_CTL_MSTR;
+ /* we choose software to controll cs */
+ chip->control &= ~SPI_CTL_ASSEL;
+
+ chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz);
+
+ adi_spi_cs_enable(drv_data, chip);
+ adi_spi_cs_deactive(drv_data, chip);
+
+ return 0;
+error:
+ if (chip) {
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+ }
+
+ return ret;
+}
+
+static void adi_spi_cleanup(struct spi_device *spi)
+{
+ struct adi_spi_device *chip = spi_get_ctldata(spi);
+ struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master);
+
+ if (!chip)
+ return;
+
+ if (chip->cs < MAX_CTRL_CS) {
+ peripheral_free(ssel[spi->master->bus_num]
+ [chip->cs-1]);
+ adi_spi_cs_disable(drv_data, chip);
+ } else {
+ gpio_free(chip->cs_gpio);
+ }
+
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+}
+
+static irqreturn_t adi_spi_tx_dma_isr(int irq, void *dev_id)
+{
+ struct adi_spi_master *drv_data = dev_id;
+ u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma);
+ u32 tx_ctl;
+
+ clear_dma_irqstat(drv_data->tx_dma);
+ if (dma_stat & DMA_DONE) {
+ drv_data->tx_num++;
+ } else {
+ dev_err(&drv_data->master->dev,
+ "spi tx dma error: %d\n", dma_stat);
+ if (drv_data->tx)
+ drv_data->state = ERROR_STATE;
+ }
+ tx_ctl = ioread32(&drv_data->regs->tx_control);
+ tx_ctl &= ~SPI_TXCTL_TDR_NF;
+ iowrite32(tx_ctl, &drv_data->regs->tx_control);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t adi_spi_rx_dma_isr(int irq, void *dev_id)
+{
+ struct adi_spi_master *drv_data = dev_id;
+ struct spi_message *msg = drv_data->cur_msg;
+ u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma);
+
+ clear_dma_irqstat(drv_data->rx_dma);
+ if (dma_stat & DMA_DONE) {
+ drv_data->rx_num++;
+ /* we may fail on tx dma */
+ if (drv_data->state != ERROR_STATE)
+ msg->actual_length += drv_data->transfer_len;
+ } else {
+ drv_data->state = ERROR_STATE;
+ dev_err(&drv_data->master->dev,
+ "spi rx dma error: %d\n", dma_stat);
+ }
+ iowrite32(0, &drv_data->regs->tx_control);
+ iowrite32(0, &drv_data->regs->rx_control);
+ if (drv_data->rx_num != drv_data->tx_num)
+ dev_dbg(&drv_data->master->dev,
+ "dma interrupt missing: tx=%d,rx=%d\n",
+ drv_data->tx_num, drv_data->rx_num);
+ tasklet_schedule(&drv_data->pump_transfers);
+ return IRQ_HANDLED;
+}
+
+static int adi_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct adi_spi3_master *info = dev_get_platdata(dev);
+ struct spi_master *master;
+ struct adi_spi_master *drv_data;
+ struct resource *mem, *res;
+ unsigned int tx_dma, rx_dma;
+ struct clk *sclk;
+ int ret;
+
+ if (!info) {
+ dev_err(dev, "platform data missing!\n");
+ return -ENODEV;
+ }
+
+ sclk = devm_clk_get(dev, "spi");
+ if (IS_ERR(sclk)) {
+ dev_err(dev, "can not get spi clock\n");
+ return PTR_ERR(sclk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(dev, "can not get tx dma resource\n");
+ return -ENXIO;
+ }
+ tx_dma = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_err(dev, "can not get rx dma resource\n");
+ return -ENXIO;
+ }
+ rx_dma = res->start;
+
+ /* allocate master with space for drv_data */
+ master = spi_alloc_master(dev, sizeof(*drv_data));
+ if (!master) {
+ dev_err(dev, "can not alloc spi_master\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+
+ /* the mode bits supported by this driver */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = info->num_chipselect;
+ master->cleanup = adi_spi_cleanup;
+ master->setup = adi_spi_setup;
+ master->transfer_one_message = adi_spi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
+
+ drv_data = spi_master_get_devdata(master);
+ drv_data->master = master;
+ drv_data->tx_dma = tx_dma;
+ drv_data->rx_dma = rx_dma;
+ drv_data->pin_req = info->pin_req;
+ drv_data->sclk = clk_get_rate(sclk);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv_data->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(drv_data->regs)) {
+ ret = PTR_ERR(drv_data->regs);
+ goto err_put_master;
+ }
+
+ /* request tx and rx dma */
+ ret = request_dma(tx_dma, "SPI_TX_DMA");
+ if (ret) {
+ dev_err(dev, "can not request SPI TX DMA channel\n");
+ goto err_put_master;
+ }
+ set_dma_callback(tx_dma, adi_spi_tx_dma_isr, drv_data);
+
+ ret = request_dma(rx_dma, "SPI_RX_DMA");
+ if (ret) {
+ dev_err(dev, "can not request SPI RX DMA channel\n");
+ goto err_free_tx_dma;
+ }
+ set_dma_callback(drv_data->rx_dma, adi_spi_rx_dma_isr, drv_data);
+
+ /* request CLK, MOSI and MISO */
+ ret = peripheral_request_list(drv_data->pin_req, "adi-spi3");
+ if (ret < 0) {
+ dev_err(dev, "can not request spi pins\n");
+ goto err_free_rx_dma;
+ }
+
+ iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control);
+ iowrite32(0x0000FE00, &drv_data->regs->ssel);
+ iowrite32(0x0, &drv_data->regs->delay);
+
+ tasklet_init(&drv_data->pump_transfers,
+ adi_spi_pump_transfers, (unsigned long)drv_data);
+ /* register with the SPI framework */
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(dev, "can not register spi master\n");
+ goto err_free_peripheral;
+ }
+
+ return ret;
+
+err_free_peripheral:
+ peripheral_free_list(drv_data->pin_req);
+err_free_rx_dma:
+ free_dma(rx_dma);
+err_free_tx_dma:
+ free_dma(tx_dma);
+err_put_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int adi_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct adi_spi_master *drv_data = spi_master_get_devdata(master);
+
+ adi_spi_disable(drv_data);
+ peripheral_free_list(drv_data->pin_req);
+ free_dma(drv_data->rx_dma);
+ free_dma(drv_data->tx_dma);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int adi_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct adi_spi_master *drv_data = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+
+ drv_data->control = ioread32(&drv_data->regs->control);
+ drv_data->ssel = ioread32(&drv_data->regs->ssel);
+
+ iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control);
+ iowrite32(0x0000FE00, &drv_data->regs->ssel);
+ dma_disable_irq(drv_data->rx_dma);
+ dma_disable_irq(drv_data->tx_dma);
+
+ return 0;
+}
+
+static int adi_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct adi_spi_master *drv_data = spi_master_get_devdata(master);
+ int ret = 0;
+
+ /* bootrom may modify spi and dma status when resume in spi boot mode */
+ disable_dma(drv_data->rx_dma);
+
+ dma_enable_irq(drv_data->rx_dma);
+ dma_enable_irq(drv_data->tx_dma);
+ iowrite32(drv_data->control, &drv_data->regs->control);
+ iowrite32(drv_data->ssel, &drv_data->regs->ssel);
+
+ ret = spi_master_resume(master);
+ if (ret) {
+ free_dma(drv_data->rx_dma);
+ free_dma(drv_data->tx_dma);
+ }
+
+ return ret;
+}
+#endif
+static const struct dev_pm_ops adi_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(adi_spi_suspend, adi_spi_resume)
+};
+
+MODULE_ALIAS("platform:adi-spi3");
+static struct platform_driver adi_spi_driver = {
+ .driver = {
+ .name = "adi-spi3",
+ .owner = THIS_MODULE,
+ .pm = &adi_spi_pm_ops,
+ },
+ .remove = adi_spi_remove,
+};
+
+module_platform_driver_probe(adi_spi_driver, adi_spi_probe);
+
+MODULE_DESCRIPTION("Analog Devices SPI3 controller driver");
+MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index f1fec2a19d1..5b5709a5c95 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -13,7 +13,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -103,16 +102,6 @@ static void altera_spi_chipsel(struct spi_device *spi, int value)
}
}
-static int altera_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
-{
- return 0;
-}
-
-static int altera_spi_setup(struct spi_device *spi)
-{
- return 0;
-}
-
static inline unsigned int hw_txbyte(struct altera_spi *hw, int count)
{
if (hw->tx) {
@@ -134,7 +123,7 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
hw->count = 0;
- hw->bytes_per_word = (t->bits_per_word ? : spi->bits_per_word) / 8;
+ hw->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
hw->len = t->len / hw->bytes_per_word;
if (hw->irq >= 0) {
@@ -150,12 +139,12 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK;
writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
} else {
- /* send the first byte */
- writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA);
-
- while (1) {
+ while (hw->count < hw->len) {
unsigned int rxd;
+ writel(hw_txbyte(hw, hw->count),
+ hw->base + ALTERA_SPI_TXDATA);
+
while (!(readl(hw->base + ALTERA_SPI_STATUS) &
ALTERA_SPI_STATUS_RRDY_MSK))
cpu_relax();
@@ -174,14 +163,7 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
}
hw->count++;
-
- if (hw->count < hw->len)
- writel(hw_txbyte(hw, hw->count),
- hw->base + ALTERA_SPI_TXDATA);
- else
- break;
}
-
}
return hw->count * hw->bytes_per_word;
@@ -215,9 +197,8 @@ static irqreturn_t altera_spi_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int __devinit altera_spi_probe(struct platform_device *pdev)
+static int altera_spi_probe(struct platform_device *pdev)
{
- struct altera_spi_platform_data *platp = pdev->dev.platform_data;
struct altera_spi *hw;
struct spi_master *master;
struct resource *res;
@@ -231,30 +212,24 @@ static int __devinit altera_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->num_chipselect = 16;
master->mode_bits = SPI_CS_HIGH;
- master->setup = altera_spi_setup;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->dev.of_node = pdev->dev.of_node;
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
/* setup the state for the bitbang driver */
- hw->bitbang.master = spi_master_get(master);
- if (!hw->bitbang.master)
- return err;
- hw->bitbang.setup_transfer = altera_spi_setupxfer;
+ hw->bitbang.master = master;
hw->bitbang.chipselect = altera_spi_chipsel;
hw->bitbang.txrx_bufs = altera_spi_txrx;
/* find and map our resources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- goto exit_busy;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name))
- goto exit_busy;
- hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!hw->base)
- goto exit_busy;
+ hw->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base)) {
+ err = PTR_ERR(hw->base);
+ goto exit;
+ }
/* program defaults into the registers */
hw->imr = 0; /* disable spi interrupts */
writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
@@ -270,9 +245,6 @@ static int __devinit altera_spi_probe(struct platform_device *pdev)
if (err)
goto exit;
}
- /* find platform data */
- if (!platp)
- hw->bitbang.master->dev.of_node = pdev->dev.of_node;
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
@@ -281,22 +253,17 @@ static int __devinit altera_spi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
return 0;
-
-exit_busy:
- err = -EBUSY;
exit:
- platform_set_drvdata(pdev, NULL);
spi_master_put(master);
return err;
}
-static int __devexit altera_spi_remove(struct platform_device *dev)
+static int altera_spi_remove(struct platform_device *dev)
{
struct altera_spi *hw = platform_get_drvdata(dev);
struct spi_master *master = hw->bitbang.master;
spi_bitbang_stop(&hw->bitbang);
- platform_set_drvdata(dev, NULL);
spi_master_put(master);
return 0;
}
@@ -304,6 +271,7 @@ static int __devexit altera_spi_remove(struct platform_device *dev)
#ifdef CONFIG_OF
static const struct of_device_id altera_spi_match[] = {
{ .compatible = "ALTR,spi-1.0", },
+ { .compatible = "altr,spi-1.0", },
{},
};
MODULE_DEVICE_TABLE(of, altera_spi_match);
@@ -311,7 +279,7 @@ MODULE_DEVICE_TABLE(of, altera_spi_match);
static struct platform_driver altera_spi_driver = {
.probe = altera_spi_probe,
- .remove = __devexit_p(altera_spi_remove),
+ .remove = altera_spi_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 249077e5cc4..058db0fe8dc 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -14,27 +14,32 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/bitops.h>
#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ath79_spi_platform.h>
#define DRV_NAME "ath79-spi"
+#define ATH79_SPI_RRW_DELAY_FACTOR 12000
+#define MHZ (1000 * 1000)
+
struct ath79_spi {
struct spi_bitbang bitbang;
u32 ioc_base;
u32 reg_ctrl;
void __iomem *base;
+ struct clk *clk;
+ unsigned rrw_delay;
};
static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg)
@@ -52,6 +57,12 @@ static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
return spi_master_get_devdata(spi->master);
}
+static inline void ath79_spi_delay(struct ath79_spi *sp, unsigned nsecs)
+{
+ if (nsecs > sp->rrw_delay)
+ ndelay(nsecs - sp->rrw_delay);
+}
+
static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
@@ -83,15 +94,8 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
}
-static int ath79_spi_setup_cs(struct spi_device *spi)
+static void ath79_spi_enable(struct ath79_spi *sp)
{
- struct ath79_spi *sp = ath79_spidev_to_sp(spi);
- struct ath79_spi_controller_data *cdata;
-
- cdata = spi->controller_data;
- if (spi->chip_select && !cdata)
- return -EINVAL;
-
/* enable GPIO mode */
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
@@ -101,53 +105,54 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
/* TODO: setup speed? */
ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
+}
- if (spi->chip_select) {
- int status = 0;
+static void ath79_spi_disable(struct ath79_spi *sp)
+{
+ /* restore CTRL register */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
+ /* disable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
+}
- status = gpio_request(cdata->gpio, dev_name(&spi->dev));
- if (status)
- return status;
+static int ath79_spi_setup_cs(struct spi_device *spi)
+{
+ struct ath79_spi_controller_data *cdata;
+ int status;
- status = gpio_direction_output(cdata->gpio,
- spi->mode & SPI_CS_HIGH);
- if (status) {
- gpio_free(cdata->gpio);
- return status;
- }
- } else {
+ cdata = spi->controller_data;
+ if (spi->chip_select && !cdata)
+ return -EINVAL;
+
+ status = 0;
+ if (spi->chip_select) {
+ unsigned long flags;
+
+ flags = GPIOF_DIR_OUT;
if (spi->mode & SPI_CS_HIGH)
- sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+ flags |= GPIOF_INIT_LOW;
else
- sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
- ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+ flags |= GPIOF_INIT_HIGH;
+
+ status = gpio_request_one(cdata->gpio, flags,
+ dev_name(&spi->dev));
}
- return 0;
+ return status;
}
static void ath79_spi_cleanup_cs(struct spi_device *spi)
{
- struct ath79_spi *sp = ath79_spidev_to_sp(spi);
-
if (spi->chip_select) {
struct ath79_spi_controller_data *cdata = spi->controller_data;
gpio_free(cdata->gpio);
}
-
- /* restore CTRL register */
- ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
- /* disable GPIO mode */
- ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
}
static int ath79_spi_setup(struct spi_device *spi)
{
int status = 0;
- if (spi->bits_per_word > 32)
- return -EINVAL;
-
if (!spi->controller_state) {
status = ath79_spi_setup_cs(spi);
if (status)
@@ -184,7 +189,11 @@ static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs,
/* setup MSB (to slave) on trailing edge */
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
+ ath79_spi_delay(sp, nsecs);
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
+ ath79_spi_delay(sp, nsecs);
+ if (bits == 1)
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
word <<= 1;
}
@@ -192,12 +201,13 @@ static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs,
return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS);
}
-static __devinit int ath79_spi_probe(struct platform_device *pdev)
+static int ath79_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ath79_spi *sp;
struct ath79_spi_platform_data *pdata;
struct resource *r;
+ unsigned long rate;
int ret;
master = spi_alloc_master(&pdev->dev, sizeof(*sp));
@@ -209,8 +219,9 @@ static __devinit int ath79_spi_probe(struct platform_device *pdev)
sp = spi_master_get_devdata(master);
platform_set_drvdata(pdev, sp);
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->setup = ath79_spi_setup;
master->cleanup = ath79_spi_cleanup;
if (pdata) {
@@ -218,7 +229,7 @@ static __devinit int ath79_spi_probe(struct platform_device *pdev)
master->num_chipselect = pdata->num_chipselect;
}
- sp->bitbang.master = spi_master_get(master);
+ sp->bitbang.master = master;
sp->bitbang.chipselect = ath79_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
@@ -230,42 +241,70 @@ static __devinit int ath79_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
- sp->base = ioremap(r->start, resource_size(r));
+ sp->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!sp->base) {
ret = -ENXIO;
goto err_put_master;
}
+ sp->clk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sp->clk)) {
+ ret = PTR_ERR(sp->clk);
+ goto err_put_master;
+ }
+
+ ret = clk_enable(sp->clk);
+ if (ret)
+ goto err_put_master;
+
+ rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ sp->rrw_delay = ATH79_SPI_RRW_DELAY_FACTOR / rate;
+ dev_dbg(&pdev->dev, "register read/write delay is %u nsecs\n",
+ sp->rrw_delay);
+
+ ath79_spi_enable(sp);
ret = spi_bitbang_start(&sp->bitbang);
if (ret)
- goto err_unmap;
+ goto err_disable;
return 0;
-err_unmap:
- iounmap(sp->base);
+err_disable:
+ ath79_spi_disable(sp);
+err_clk_disable:
+ clk_disable(sp->clk);
err_put_master:
- platform_set_drvdata(pdev, NULL);
spi_master_put(sp->bitbang.master);
return ret;
}
-static __devexit int ath79_spi_remove(struct platform_device *pdev)
+static int ath79_spi_remove(struct platform_device *pdev)
{
struct ath79_spi *sp = platform_get_drvdata(pdev);
spi_bitbang_stop(&sp->bitbang);
- iounmap(sp->base);
- platform_set_drvdata(pdev, NULL);
+ ath79_spi_disable(sp);
+ clk_disable(sp->clk);
spi_master_put(sp->bitbang.master);
return 0;
}
+static void ath79_spi_shutdown(struct platform_device *pdev)
+{
+ ath79_spi_remove(pdev);
+}
+
static struct platform_driver ath79_spi_driver = {
.probe = ath79_spi_probe,
- .remove = __devexit_p(ath79_spi_remove),
+ .remove = ath79_spi_remove,
+ .shutdown = ath79_spi_shutdown,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 16d6a839c7f..92a6f0d9323 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -9,21 +9,23 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
+#include <linux/platform_data/atmel.h>
+#include <linux/platform_data/dma-atmel.h>
+#include <linux/of.h>
-#include <asm/io.h>
-#include <mach/board.h>
-#include <asm/gpio.h>
-#include <mach/cpu.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
/* SPI register offsets */
#define SPI_CR 0x0000
@@ -38,6 +40,7 @@
#define SPI_CSR1 0x0034
#define SPI_CSR2 0x0038
#define SPI_CSR3 0x003c
+#define SPI_VERSION 0x00fc
#define SPI_RPR 0x0100
#define SPI_RCR 0x0104
#define SPI_TPR 0x0108
@@ -70,6 +73,8 @@
#define SPI_FDIV_SIZE 1
#define SPI_MODFDIS_OFFSET 4
#define SPI_MODFDIS_SIZE 1
+#define SPI_WDRBT_OFFSET 5
+#define SPI_WDRBT_SIZE 1
#define SPI_LLB_OFFSET 7
#define SPI_LLB_SIZE 1
#define SPI_PCS_OFFSET 16
@@ -165,20 +170,43 @@
/* Bit manipulation macros */
#define SPI_BIT(name) \
(1 << SPI_##name##_OFFSET)
-#define SPI_BF(name,value) \
+#define SPI_BF(name, value) \
(((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
-#define SPI_BFEXT(name,value) \
+#define SPI_BFEXT(name, value) \
(((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
-#define SPI_BFINS(name,value,old) \
- ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
- | SPI_BF(name,value))
+#define SPI_BFINS(name, value, old) \
+ (((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
+ | SPI_BF(name, value))
/* Register access macros */
-#define spi_readl(port,reg) \
+#define spi_readl(port, reg) \
__raw_readl((port)->regs + SPI_##reg)
-#define spi_writel(port,reg,value) \
+#define spi_writel(port, reg, value) \
__raw_writel((value), (port)->regs + SPI_##reg)
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 16
+
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+struct atmel_spi_dma {
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct scatterlist sgrx;
+ struct scatterlist sgtx;
+ struct dma_async_tx_descriptor *data_desc_rx;
+ struct dma_async_tx_descriptor *data_desc_tx;
+
+ struct at_dma_slave dma_slave;
+};
+
+struct atmel_spi_caps {
+ bool is_spi2;
+ bool has_wdrbt;
+ bool has_dma_support;
+};
/*
* The core SPI transfer engine just talks to a register bank to set up
@@ -187,22 +215,33 @@
*/
struct atmel_spi {
spinlock_t lock;
+ unsigned long flags;
+ phys_addr_t phybase;
void __iomem *regs;
int irq;
struct clk *clk;
struct platform_device *pdev;
- struct spi_device *stay;
- u8 stopping;
- struct list_head queue;
struct spi_transfer *current_transfer;
- unsigned long current_remaining_bytes;
- struct spi_transfer *next_transfer;
- unsigned long next_remaining_bytes;
+ int current_remaining_bytes;
+ int done_status;
+
+ struct completion xfer_completion;
+ /* scratch buffer */
void *buffer;
dma_addr_t buffer_dma;
+
+ struct atmel_spi_caps caps;
+
+ bool use_dma;
+ bool use_pdc;
+ /* dmaengine data */
+ struct atmel_spi_dma dma;
+
+ bool keep_cs;
+ bool cs_active;
};
/* Controller-specific per-slave state */
@@ -221,14 +260,10 @@ struct atmel_spi_device {
* - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
* - SPI_CSRx.CSAAT
* - SPI_CSRx.SBCR allows faster clocking
- *
- * We can determine the controller version by reading the VERSION
- * register, but I haven't checked that it exists on all chips, and
- * this is cheaper anyway.
*/
-static bool atmel_spi_is_v2(void)
+static bool atmel_spi_is_v2(struct atmel_spi *as)
{
- return !cpu_is_at91rm9200();
+ return as->caps.is_spi2;
}
/*
@@ -249,11 +284,6 @@ static bool atmel_spi_is_v2(void)
* Master on Chip Select 0.") No workaround exists for that ... so for
* nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
* and (c) will trigger that first erratum in some cases.
- *
- * TODO: Test if the atmel_spi_is_v2() branch below works on
- * AT91RM9200 if we use some other register than CSR0. However, don't
- * do this unconditionally since AP7000 has an errata where the BITS
- * field in CSR0 overrides all other CSRs.
*/
static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
@@ -262,15 +292,25 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
unsigned active = spi->mode & SPI_CS_HIGH;
u32 mr;
- if (atmel_spi_is_v2()) {
- /*
- * Always use CSR0. This ensures that the clock
- * switches to the correct idle polarity before we
- * toggle the CS.
+ if (atmel_spi_is_v2(as)) {
+ spi_writel(as, CSR0 + 4 * spi->chip_select, asd->csr);
+ /* For the low SPI version, there is a issue that PDC transfer
+ * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
*/
spi_writel(as, CSR0, asd->csr);
- spi_writel(as, MR, SPI_BF(PCS, 0x0e) | SPI_BIT(MODFDIS)
- | SPI_BIT(MSTR));
+ if (as->caps.has_wdrbt) {
+ spi_writel(as, MR,
+ SPI_BF(PCS, ~(0x01 << spi->chip_select))
+ | SPI_BIT(WDRBT)
+ | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ } else {
+ spi_writel(as, MR,
+ SPI_BF(PCS, ~(0x01 << spi->chip_select))
+ | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ }
+
mr = spi_readl(as, MR);
gpio_set_value(asd->npcs_pin, active);
} else {
@@ -317,19 +357,296 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
asd->npcs_pin, active ? " (low)" : "",
mr);
- if (atmel_spi_is_v2() || spi->chip_select != 0)
+ if (atmel_spi_is_v2(as) || spi->chip_select != 0)
gpio_set_value(asd->npcs_pin, !active);
}
-static inline int atmel_spi_xfer_is_last(struct spi_message *msg,
- struct spi_transfer *xfer)
+static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
+{
+ spin_lock_irqsave(&as->lock, as->flags);
+}
+
+static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
+{
+ spin_unlock_irqrestore(&as->lock, as->flags);
+}
+
+static inline bool atmel_spi_use_dma(struct atmel_spi *as,
+ struct spi_transfer *xfer)
+{
+ return as->use_dma && xfer->len >= DMA_MIN_BYTES;
+}
+
+static int atmel_spi_dma_slave_config(struct atmel_spi *as,
+ struct dma_slave_config *slave_config,
+ u8 bits_per_word)
+{
+ int err = 0;
+
+ if (bits_per_word > 8) {
+ slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ }
+
+ slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
+ slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR;
+ slave_config->src_maxburst = 1;
+ slave_config->dst_maxburst = 1;
+ slave_config->device_fc = false;
+
+ slave_config->direction = DMA_MEM_TO_DEV;
+ if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) {
+ dev_err(&as->pdev->dev,
+ "failed to configure tx dma channel\n");
+ err = -EINVAL;
+ }
+
+ slave_config->direction = DMA_DEV_TO_MEM;
+ if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) {
+ dev_err(&as->pdev->dev,
+ "failed to configure rx dma channel\n");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static bool filter(struct dma_chan *chan, void *pdata)
+{
+ struct atmel_spi_dma *sl_pdata = pdata;
+ struct at_dma_slave *sl;
+
+ if (!sl_pdata)
+ return false;
+
+ sl = &sl_pdata->dma_slave;
+ if (sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int atmel_spi_configure_dma(struct atmel_spi *as)
+{
+ struct dma_slave_config slave_config;
+ struct device *dev = &as->pdev->dev;
+ int err;
+
+ dma_cap_mask_t mask;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ as->dma.chan_tx = dma_request_slave_channel_compat(mask, filter,
+ &as->dma,
+ dev, "tx");
+ if (!as->dma.chan_tx) {
+ dev_err(dev,
+ "DMA TX channel not available, SPI unable to use DMA\n");
+ err = -EBUSY;
+ goto error;
+ }
+
+ as->dma.chan_rx = dma_request_slave_channel_compat(mask, filter,
+ &as->dma,
+ dev, "rx");
+
+ if (!as->dma.chan_rx) {
+ dev_err(dev,
+ "DMA RX channel not available, SPI unable to use DMA\n");
+ err = -EBUSY;
+ goto error;
+ }
+
+ err = atmel_spi_dma_slave_config(as, &slave_config, 8);
+ if (err)
+ goto error;
+
+ dev_info(&as->pdev->dev,
+ "Using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(as->dma.chan_tx),
+ dma_chan_name(as->dma.chan_rx));
+ return 0;
+error:
+ if (as->dma.chan_rx)
+ dma_release_channel(as->dma.chan_rx);
+ if (as->dma.chan_tx)
+ dma_release_channel(as->dma.chan_tx);
+ return err;
+}
+
+static void atmel_spi_stop_dma(struct atmel_spi *as)
{
- return msg->transfers.prev == &xfer->transfer_list;
+ if (as->dma.chan_rx)
+ as->dma.chan_rx->device->device_control(as->dma.chan_rx,
+ DMA_TERMINATE_ALL, 0);
+ if (as->dma.chan_tx)
+ as->dma.chan_tx->device->device_control(as->dma.chan_tx,
+ DMA_TERMINATE_ALL, 0);
}
-static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer)
+static void atmel_spi_release_dma(struct atmel_spi *as)
{
- return xfer->delay_usecs == 0 && !xfer->cs_change;
+ if (as->dma.chan_rx)
+ dma_release_channel(as->dma.chan_rx);
+ if (as->dma.chan_tx)
+ dma_release_channel(as->dma.chan_tx);
+}
+
+/* This function is called by the DMA driver from tasklet context */
+static void dma_callback(void *data)
+{
+ struct spi_master *master = data;
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ complete(&as->xfer_completion);
+}
+
+/*
+ * Next transfer using PIO.
+ */
+static void atmel_spi_next_xfer_pio(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
+
+ dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
+
+ /* Make sure data is not remaining in RDR */
+ spi_readl(as, RDR);
+ while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
+ spi_readl(as, RDR);
+ cpu_relax();
+ }
+
+ if (xfer->tx_buf) {
+ if (xfer->bits_per_word > 8)
+ spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
+ else
+ spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
+ } else {
+ spi_writel(as, TDR, 0);
+ }
+
+ dev_dbg(master->dev.parent,
+ " start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
+ xfer->bits_per_word);
+
+ /* Enable relevant interrupts */
+ spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
+}
+
+/*
+ * Submit next transfer for DMA.
+ */
+static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
+ struct spi_transfer *xfer,
+ u32 *plen)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ struct dma_chan *rxchan = as->dma.chan_rx;
+ struct dma_chan *txchan = as->dma.chan_tx;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+ struct dma_slave_config slave_config;
+ dma_cookie_t cookie;
+ u32 len = *plen;
+
+ dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
+
+ /* Check that the channels are available */
+ if (!rxchan || !txchan)
+ return -ENODEV;
+
+ /* release lock for DMA operations */
+ atmel_spi_unlock(as);
+
+ /* prepare the RX dma transfer */
+ sg_init_table(&as->dma.sgrx, 1);
+ if (xfer->rx_buf) {
+ as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen;
+ } else {
+ as->dma.sgrx.dma_address = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ }
+
+ /* prepare the TX dma transfer */
+ sg_init_table(&as->dma.sgtx, 1);
+ if (xfer->tx_buf) {
+ as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen;
+ } else {
+ as->dma.sgtx.dma_address = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ memset(as->buffer, 0, len);
+ }
+
+ sg_dma_len(&as->dma.sgtx) = len;
+ sg_dma_len(&as->dma.sgrx) = len;
+
+ *plen = len;
+
+ if (atmel_spi_dma_slave_config(as, &slave_config, 8))
+ goto err_exit;
+
+ /* Send both scatterlists */
+ rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
+ &as->dma.sgrx,
+ 1,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK,
+ NULL);
+ if (!rxdesc)
+ goto err_dma;
+
+ txdesc = txchan->device->device_prep_slave_sg(txchan,
+ &as->dma.sgtx,
+ 1,
+ DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK,
+ NULL);
+ if (!txdesc)
+ goto err_dma;
+
+ dev_dbg(master->dev.parent,
+ " start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
+ xfer->rx_buf, (unsigned long long)xfer->rx_dma);
+
+ /* Enable relevant interrupts */
+ spi_writel(as, IER, SPI_BIT(OVRES));
+
+ /* Put the callback on the RX transfer only, that should finish last */
+ rxdesc->callback = dma_callback;
+ rxdesc->callback_param = master;
+
+ /* Submit and fire RX and TX with TX last so we're ready to read! */
+ cookie = rxdesc->tx_submit(rxdesc);
+ if (dma_submit_error(cookie))
+ goto err_dma;
+ cookie = txdesc->tx_submit(txdesc);
+ if (dma_submit_error(cookie))
+ goto err_dma;
+ rxchan->device->device_issue_pending(rxchan);
+ txchan->device->device_issue_pending(txchan);
+
+ /* take back lock */
+ atmel_spi_lock(as);
+ return 0;
+
+err_dma:
+ spi_writel(as, IDR, SPI_BIT(OVRES));
+ atmel_spi_stop_dma(as);
+err_exit:
+ atmel_spi_lock(as);
+ return -ENOMEM;
}
static void atmel_spi_next_xfer_data(struct spi_master *master,
@@ -349,6 +666,7 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
if (len > BUFFER_SIZE)
len = BUFFER_SIZE;
}
+
if (xfer->tx_buf)
*tx_dma = xfer->tx_dma + xfer->len - *plen;
else {
@@ -363,73 +681,90 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
*plen = len;
}
-/*
- * Submit next transfer for DMA.
- * lock is held, spi irq is blocked
- */
-static void atmel_spi_next_xfer(struct spi_master *master,
- struct spi_message *msg)
+static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_transfer *xfer;
- u32 len, remaining;
- u32 ieval;
- dma_addr_t tx_dma, rx_dma;
+ u32 scbr, csr;
+ unsigned long bus_hz;
- if (!as->current_transfer)
- xfer = list_entry(msg->transfers.next,
- struct spi_transfer, transfer_list);
- else if (!as->next_transfer)
- xfer = list_entry(as->current_transfer->transfer_list.next,
- struct spi_transfer, transfer_list);
- else
- xfer = NULL;
+ /* v1 chips start out at half the peripheral bus speed. */
+ bus_hz = clk_get_rate(as->clk);
+ if (!atmel_spi_is_v2(as))
+ bus_hz /= 2;
- if (xfer) {
- spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+ /*
+ * Calculate the lowest divider that satisfies the
+ * constraint, assuming div32/fdiv/mbz == 0.
+ */
+ if (xfer->speed_hz)
+ scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
+ else
+ /*
+ * This can happend if max_speed is null.
+ * In this case, we set the lowest possible speed
+ */
+ scbr = 0xff;
- len = xfer->len;
- atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
- remaining = xfer->len - len;
+ /*
+ * If the resulting divider doesn't fit into the
+ * register bitfield, we can't satisfy the constraint.
+ */
+ if (scbr >= (1 << SPI_SCBR_SIZE)) {
+ dev_err(&spi->dev,
+ "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
+ xfer->speed_hz, scbr, bus_hz/255);
+ return -EINVAL;
+ }
+ if (scbr == 0) {
+ dev_err(&spi->dev,
+ "setup: %d Hz too high, scbr %u; max %ld Hz\n",
+ xfer->speed_hz, scbr, bus_hz);
+ return -EINVAL;
+ }
+ csr = spi_readl(as, CSR0 + 4 * spi->chip_select);
+ csr = SPI_BFINS(SCBR, scbr, csr);
+ spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
- spi_writel(as, RPR, rx_dma);
- spi_writel(as, TPR, tx_dma);
+ return 0;
+}
- if (msg->spi->bits_per_word > 8)
- len >>= 1;
- spi_writel(as, RCR, len);
- spi_writel(as, TCR, len);
+/*
+ * Submit next transfer for PDC.
+ * lock is held, spi irq is blocked
+ */
+static void atmel_spi_pdc_next_xfer(struct spi_master *master,
+ struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 len;
+ dma_addr_t tx_dma, rx_dma;
- dev_dbg(&msg->spi->dev,
- " start xfer %p: len %u tx %p/%08x rx %p/%08x\n",
- xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
- xfer->rx_buf, xfer->rx_dma);
- } else {
- xfer = as->next_transfer;
- remaining = as->next_remaining_bytes;
- }
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
- as->current_transfer = xfer;
- as->current_remaining_bytes = remaining;
+ len = as->current_remaining_bytes;
+ atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ as->current_remaining_bytes -= len;
- if (remaining > 0)
- len = remaining;
- else if (!atmel_spi_xfer_is_last(msg, xfer)
- && atmel_spi_xfer_can_be_chained(xfer)) {
- xfer = list_entry(xfer->transfer_list.next,
- struct spi_transfer, transfer_list);
- len = xfer->len;
- } else
- xfer = NULL;
+ spi_writel(as, RPR, rx_dma);
+ spi_writel(as, TPR, tx_dma);
- as->next_transfer = xfer;
+ if (msg->spi->bits_per_word > 8)
+ len >>= 1;
+ spi_writel(as, RCR, len);
+ spi_writel(as, TCR, len);
- if (xfer) {
- u32 total;
+ dev_dbg(&msg->spi->dev,
+ " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf,
+ (unsigned long long)xfer->tx_dma, xfer->rx_buf,
+ (unsigned long long)xfer->rx_dma);
- total = len;
+ if (as->current_remaining_bytes) {
+ len = as->current_remaining_bytes;
atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
- as->next_remaining_bytes = total - len;
+ as->current_remaining_bytes -= len;
spi_writel(as, RNPR, rx_dma);
spi_writel(as, TNPR, tx_dma);
@@ -440,14 +775,10 @@ static void atmel_spi_next_xfer(struct spi_master *master,
spi_writel(as, TNCR, len);
dev_dbg(&msg->spi->dev,
- " next xfer %p: len %u tx %p/%08x rx %p/%08x\n",
- xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
- xfer->rx_buf, xfer->rx_dma);
- ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES);
- } else {
- spi_writel(as, RNCR, 0);
- spi_writel(as, TNCR, 0);
- ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES);
+ " next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf,
+ (unsigned long long)xfer->tx_dma, xfer->rx_buf,
+ (unsigned long long)xfer->rx_dma);
}
/* REVISIT: We're waiting for ENDRX before we start the next
@@ -460,37 +791,10 @@ static void atmel_spi_next_xfer(struct spi_master *master,
*
* It should be doable, though. Just not now...
*/
- spi_writel(as, IER, ieval);
+ spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
}
-static void atmel_spi_next_message(struct spi_master *master)
-{
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_message *msg;
- struct spi_device *spi;
-
- BUG_ON(as->current_transfer);
-
- msg = list_entry(as->queue.next, struct spi_message, queue);
- spi = msg->spi;
-
- dev_dbg(master->dev.parent, "start message %p for %s\n",
- msg, dev_name(&spi->dev));
-
- /* select chip if it's not still active */
- if (as->stay) {
- if (as->stay != spi) {
- cs_deactivate(as, as->stay);
- cs_activate(as, spi);
- }
- as->stay = NULL;
- } else
- cs_activate(as, spi);
-
- atmel_spi_next_xfer(master, msg);
-}
-
/*
* For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
* - The buffer is either valid for CPU access, else NULL
@@ -541,62 +845,66 @@ static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
xfer->len, DMA_FROM_DEVICE);
}
-static void
-atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
- struct spi_message *msg, int status, int stay)
+static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
{
- if (!stay || status < 0)
- cs_deactivate(as, msg->spi);
- else
- as->stay = msg->spi;
-
- list_del(&msg->queue);
- msg->status = status;
-
- dev_dbg(master->dev.parent,
- "xfer complete: %u bytes transferred\n",
- msg->actual_length);
-
- spin_unlock(&as->lock);
- msg->complete(msg->context);
- spin_lock(&as->lock);
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+}
- as->current_transfer = NULL;
- as->next_transfer = NULL;
+/* Called from IRQ
+ *
+ * Must update "current_remaining_bytes" to keep track of data
+ * to transfer.
+ */
+static void
+atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
+{
+ u8 *rxp;
+ u16 *rxp16;
+ unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
- /* continue if needed */
- if (list_empty(&as->queue) || as->stopping)
- spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
- else
- atmel_spi_next_message(master);
+ if (xfer->rx_buf) {
+ if (xfer->bits_per_word > 8) {
+ rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
+ *rxp16 = spi_readl(as, RDR);
+ } else {
+ rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
+ *rxp = spi_readl(as, RDR);
+ }
+ } else {
+ spi_readl(as, RDR);
+ }
+ if (xfer->bits_per_word > 8) {
+ if (as->current_remaining_bytes > 2)
+ as->current_remaining_bytes -= 2;
+ else
+ as->current_remaining_bytes = 0;
+ } else {
+ as->current_remaining_bytes--;
+ }
}
+/* Interrupt
+ *
+ * No need for locking in this Interrupt handler: done_status is the
+ * only information modified.
+ */
static irqreturn_t
-atmel_spi_interrupt(int irq, void *dev_id)
+atmel_spi_pio_interrupt(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_message *msg;
- struct spi_transfer *xfer;
u32 status, pending, imr;
+ struct spi_transfer *xfer;
int ret = IRQ_NONE;
- spin_lock(&as->lock);
-
- xfer = as->current_transfer;
- msg = list_entry(as->queue.next, struct spi_message, queue);
-
imr = spi_readl(as, IMR);
status = spi_readl(as, SR);
pending = status & imr;
if (pending & SPI_BIT(OVRES)) {
- int timeout;
-
ret = IRQ_HANDLED;
-
- spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
- | SPI_BIT(OVRES)));
+ spi_writel(as, IDR, SPI_BIT(OVRES));
+ dev_warn(master->dev.parent, "overrun\n");
/*
* When we get an overrun, we disregard the current
@@ -606,85 +914,72 @@ atmel_spi_interrupt(int irq, void *dev_id)
*
* We will also not process any remaning transfers in
* the message.
- *
- * First, stop the transfer and unmap the DMA buffers.
*/
- spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
- if (!msg->is_dma_mapped)
- atmel_spi_dma_unmap_xfer(master, xfer);
+ as->done_status = -EIO;
+ smp_wmb();
- /* REVISIT: udelay in irq is unfriendly */
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+ /* Clear any overrun happening while cleaning up */
+ spi_readl(as, SR);
- dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n",
- spi_readl(as, TCR), spi_readl(as, RCR));
+ complete(&as->xfer_completion);
- /*
- * Clean up DMA registers and make sure the data
- * registers are empty.
- */
- spi_writel(as, RNCR, 0);
- spi_writel(as, TNCR, 0);
- spi_writel(as, RCR, 0);
- spi_writel(as, TCR, 0);
- for (timeout = 1000; timeout; timeout--)
- if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
- break;
- if (!timeout)
- dev_warn(master->dev.parent,
- "timeout waiting for TXEMPTY");
- while (spi_readl(as, SR) & SPI_BIT(RDRF))
- spi_readl(as, RDR);
+ } else if (pending & SPI_BIT(RDRF)) {
+ atmel_spi_lock(as);
+
+ if (as->current_remaining_bytes) {
+ ret = IRQ_HANDLED;
+ xfer = as->current_transfer;
+ atmel_spi_pump_pio_data(as, xfer);
+ if (!as->current_remaining_bytes)
+ spi_writel(as, IDR, pending);
+
+ complete(&as->xfer_completion);
+ }
+
+ atmel_spi_unlock(as);
+ } else {
+ WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending);
+ ret = IRQ_HANDLED;
+ spi_writel(as, IDR, pending);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+atmel_spi_pdc_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 status, pending, imr;
+ int ret = IRQ_NONE;
+
+ imr = spi_readl(as, IMR);
+ status = spi_readl(as, SR);
+ pending = status & imr;
+
+ if (pending & SPI_BIT(OVRES)) {
+
+ ret = IRQ_HANDLED;
+
+ spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
+ | SPI_BIT(OVRES)));
/* Clear any overrun happening while cleaning up */
spi_readl(as, SR);
- atmel_spi_msg_done(master, as, msg, -EIO, 0);
+ as->done_status = -EIO;
+
+ complete(&as->xfer_completion);
+
} else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
ret = IRQ_HANDLED;
spi_writel(as, IDR, pending);
- if (as->current_remaining_bytes == 0) {
- msg->actual_length += xfer->len;
-
- if (!msg->is_dma_mapped)
- atmel_spi_dma_unmap_xfer(master, xfer);
-
- /* REVISIT: udelay in irq is unfriendly */
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
-
- if (atmel_spi_xfer_is_last(msg, xfer)) {
- /* report completed message */
- atmel_spi_msg_done(master, as, msg, 0,
- xfer->cs_change);
- } else {
- if (xfer->cs_change) {
- cs_deactivate(as, msg->spi);
- udelay(1);
- cs_activate(as, msg->spi);
- }
-
- /*
- * Not done yet. Submit the next transfer.
- *
- * FIXME handle protocol options for xfer
- */
- atmel_spi_next_xfer(master, msg);
- }
- } else {
- /*
- * Keep going, we still have data to send in
- * the current transfer.
- */
- atmel_spi_next_xfer(master, msg);
- }
+ complete(&as->xfer_completion);
}
- spin_unlock(&as->lock);
-
return ret;
}
@@ -692,66 +987,22 @@ static int atmel_spi_setup(struct spi_device *spi)
{
struct atmel_spi *as;
struct atmel_spi_device *asd;
- u32 scbr, csr;
+ u32 csr;
unsigned int bits = spi->bits_per_word;
- unsigned long bus_hz;
unsigned int npcs_pin;
int ret;
as = spi_master_get_devdata(spi->master);
- if (as->stopping)
- return -ESHUTDOWN;
-
- if (spi->chip_select > spi->master->num_chipselect) {
- dev_dbg(&spi->dev,
- "setup: invalid chipselect %u (%u defined)\n",
- spi->chip_select, spi->master->num_chipselect);
- return -EINVAL;
- }
-
- if (bits < 8 || bits > 16) {
- dev_dbg(&spi->dev,
- "setup: invalid bits_per_word %u (8 to 16)\n",
- bits);
- return -EINVAL;
- }
-
/* see notes above re chipselect */
- if (!atmel_spi_is_v2()
+ if (!atmel_spi_is_v2(as)
&& spi->chip_select == 0
&& (spi->mode & SPI_CS_HIGH)) {
dev_dbg(&spi->dev, "setup: can't be active-high\n");
return -EINVAL;
}
- /* v1 chips start out at half the peripheral bus speed. */
- bus_hz = clk_get_rate(as->clk);
- if (!atmel_spi_is_v2())
- bus_hz /= 2;
-
- if (spi->max_speed_hz) {
- /*
- * Calculate the lowest divider that satisfies the
- * constraint, assuming div32/fdiv/mbz == 0.
- */
- scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz);
-
- /*
- * If the resulting divider doesn't fit into the
- * register bitfield, we can't satisfy the constraint.
- */
- if (scbr >= (1 << SPI_SCBR_SIZE)) {
- dev_dbg(&spi->dev,
- "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
- spi->max_speed_hz, scbr, bus_hz/255);
- return -EINVAL;
- }
- } else
- /* speed zero means "as slow as possible" */
- scbr = 0xff;
-
- csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8);
+ csr = SPI_BF(BITS, bits - 8);
if (spi->mode & SPI_CPOL)
csr |= SPI_BIT(CPOL);
if (!(spi->mode & SPI_CPHA))
@@ -768,6 +1019,10 @@ static int atmel_spi_setup(struct spi_device *spi)
/* chipselect must have been muxed as GPIO (e.g. in board setup) */
npcs_pin = (unsigned int)spi->controller_data;
+
+ if (gpio_is_valid(spi->cs_gpio))
+ npcs_pin = spi->cs_gpio;
+
asd = spi->controller_state;
if (!asd) {
asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
@@ -783,131 +1038,251 @@ static int atmel_spi_setup(struct spi_device *spi)
asd->npcs_pin = npcs_pin;
spi->controller_state = asd;
gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
- } else {
- unsigned long flags;
-
- spin_lock_irqsave(&as->lock, flags);
- if (as->stay == spi)
- as->stay = NULL;
- cs_deactivate(as, spi);
- spin_unlock_irqrestore(&as->lock, flags);
}
asd->csr = csr;
dev_dbg(&spi->dev,
- "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n",
- bus_hz / scbr, bits, spi->mode, spi->chip_select, csr);
+ "setup: bpw %u mode 0x%x -> csr%d %08x\n",
+ bits, spi->mode, spi->chip_select, csr);
- if (!atmel_spi_is_v2())
+ if (!atmel_spi_is_v2(as))
spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
return 0;
}
-static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+static int atmel_spi_one_transfer(struct spi_master *master,
+ struct spi_message *msg,
+ struct spi_transfer *xfer)
{
struct atmel_spi *as;
- struct spi_transfer *xfer;
- unsigned long flags;
- struct device *controller = spi->master->dev.parent;
+ struct spi_device *spi = msg->spi;
u8 bits;
+ u32 len;
struct atmel_spi_device *asd;
+ int timeout;
+ int ret;
- as = spi_master_get_devdata(spi->master);
-
- dev_dbg(controller, "new message %p submitted for %s\n",
- msg, dev_name(&spi->dev));
+ as = spi_master_get_devdata(master);
- if (unlikely(list_empty(&msg->transfers)))
+ if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
+ dev_dbg(&spi->dev, "missing rx or tx buf\n");
return -EINVAL;
+ }
- if (as->stopping)
- return -ESHUTDOWN;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
- dev_dbg(&spi->dev, "missing rx or tx buf\n");
- return -EINVAL;
+ if (xfer->bits_per_word) {
+ asd = spi->controller_state;
+ bits = (asd->csr >> 4) & 0xf;
+ if (bits != xfer->bits_per_word - 8) {
+ dev_dbg(&spi->dev,
+ "you can't yet change bits_per_word in transfers\n");
+ return -ENOPROTOOPT;
}
+ }
+
+ /*
+ * DMA map early, for performance (empties dcache ASAP) and
+ * better fault reporting.
+ */
+ if ((!msg->is_dma_mapped)
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) {
+ if (atmel_spi_dma_map_xfer(as, xfer) < 0)
+ return -ENOMEM;
+ }
+
+ atmel_spi_set_xfer_speed(as, msg->spi, xfer);
- if (xfer->bits_per_word) {
- asd = spi->controller_state;
- bits = (asd->csr >> 4) & 0xf;
- if (bits != xfer->bits_per_word - 8) {
- dev_dbg(&spi->dev, "you can't yet change "
- "bits_per_word in transfers\n");
- return -ENOPROTOOPT;
+ as->done_status = 0;
+ as->current_transfer = xfer;
+ as->current_remaining_bytes = xfer->len;
+ while (as->current_remaining_bytes) {
+ reinit_completion(&as->xfer_completion);
+
+ if (as->use_pdc) {
+ atmel_spi_pdc_next_xfer(master, msg, xfer);
+ } else if (atmel_spi_use_dma(as, xfer)) {
+ len = as->current_remaining_bytes;
+ ret = atmel_spi_next_xfer_dma_submit(master,
+ xfer, &len);
+ if (ret) {
+ dev_err(&spi->dev,
+ "unable to use DMA, fallback to PIO\n");
+ atmel_spi_next_xfer_pio(master, xfer);
+ } else {
+ as->current_remaining_bytes -= len;
+ if (as->current_remaining_bytes < 0)
+ as->current_remaining_bytes = 0;
}
+ } else {
+ atmel_spi_next_xfer_pio(master, xfer);
}
- /* FIXME implement these protocol options!! */
- if (xfer->speed_hz) {
- dev_dbg(&spi->dev, "no protocol options yet\n");
- return -ENOPROTOOPT;
+ /* interrupts are disabled, so free the lock for schedule */
+ atmel_spi_unlock(as);
+ ret = wait_for_completion_timeout(&as->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ atmel_spi_lock(as);
+ if (WARN_ON(ret == 0)) {
+ dev_err(&spi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ as->done_status = -EIO;
+ } else {
+ ret = 0;
}
- /*
- * DMA map early, for performance (empties dcache ASAP) and
- * better fault reporting. This is a DMA-only driver.
- *
- * NOTE that if dma_unmap_single() ever starts to do work on
- * platforms supported by this driver, we would need to clean
- * up mappings for previously-mapped transfers.
- */
- if (!msg->is_dma_mapped) {
- if (atmel_spi_dma_map_xfer(as, xfer) < 0)
- return -ENOMEM;
+ if (as->done_status)
+ break;
+ }
+
+ if (as->done_status) {
+ if (as->use_pdc) {
+ dev_warn(master->dev.parent,
+ "overrun (%u/%u remaining)\n",
+ spi_readl(as, TCR), spi_readl(as, RCR));
+
+ /*
+ * Clean up DMA registers and make sure the data
+ * registers are empty.
+ */
+ spi_writel(as, RNCR, 0);
+ spi_writel(as, TNCR, 0);
+ spi_writel(as, RCR, 0);
+ spi_writel(as, TCR, 0);
+ for (timeout = 1000; timeout; timeout--)
+ if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
+ break;
+ if (!timeout)
+ dev_warn(master->dev.parent,
+ "timeout waiting for TXEMPTY");
+ while (spi_readl(as, SR) & SPI_BIT(RDRF))
+ spi_readl(as, RDR);
+
+ /* Clear any overrun happening while cleaning up */
+ spi_readl(as, SR);
+
+ } else if (atmel_spi_use_dma(as, xfer)) {
+ atmel_spi_stop_dma(as);
}
+
+ if (!msg->is_dma_mapped
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ atmel_spi_dma_unmap_xfer(master, xfer);
+
+ return 0;
+
+ } else {
+ /* only update length if no error */
+ msg->actual_length += xfer->len;
+ }
+
+ if (!msg->is_dma_mapped
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ atmel_spi_dma_unmap_xfer(master, xfer);
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change) {
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ as->keep_cs = true;
+ } else {
+ as->cs_active = !as->cs_active;
+ if (as->cs_active)
+ cs_activate(as, msg->spi);
+ else
+ cs_deactivate(as, msg->spi);
+ }
+ }
+
+ return 0;
+}
+
+static int atmel_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct atmel_spi *as;
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret = 0;
+
+ as = spi_master_get_devdata(master);
+
+ dev_dbg(&spi->dev, "new message %p submitted for %s\n",
+ msg, dev_name(&spi->dev));
+
+ atmel_spi_lock(as);
+ cs_activate(as, spi);
+
+ as->cs_active = true;
+ as->keep_cs = false;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ ret = atmel_spi_one_transfer(master, msg, xfer);
+ if (ret)
+ goto msg_done;
}
-#ifdef VERBOSE
+ if (as->use_pdc)
+ atmel_spi_disable_pdc_transfer(as);
+
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- dev_dbg(controller,
- " xfer %p: len %u tx %p/%08x rx %p/%08x\n",
+ dev_dbg(&spi->dev,
+ " xfer %p: len %u tx %p/%pad rx %p/%pad\n",
xfer, xfer->len,
- xfer->tx_buf, xfer->tx_dma,
- xfer->rx_buf, xfer->rx_dma);
+ xfer->tx_buf, &xfer->tx_dma,
+ xfer->rx_buf, &xfer->rx_dma);
}
-#endif
- msg->status = -EINPROGRESS;
- msg->actual_length = 0;
+msg_done:
+ if (!as->keep_cs)
+ cs_deactivate(as, msg->spi);
- spin_lock_irqsave(&as->lock, flags);
- list_add_tail(&msg->queue, &as->queue);
- if (!as->current_transfer)
- atmel_spi_next_message(spi->master);
- spin_unlock_irqrestore(&as->lock, flags);
+ atmel_spi_unlock(as);
- return 0;
+ msg->status = as->done_status;
+ spi_finalize_current_message(spi->master);
+
+ return ret;
}
static void atmel_spi_cleanup(struct spi_device *spi)
{
- struct atmel_spi *as = spi_master_get_devdata(spi->master);
struct atmel_spi_device *asd = spi->controller_state;
unsigned gpio = (unsigned) spi->controller_data;
- unsigned long flags;
if (!asd)
return;
- spin_lock_irqsave(&as->lock, flags);
- if (as->stay == spi) {
- as->stay = NULL;
- cs_deactivate(as, spi);
- }
- spin_unlock_irqrestore(&as->lock, flags);
-
spi->controller_state = NULL;
gpio_free(gpio);
kfree(asd);
}
+static inline unsigned int atmel_get_version(struct atmel_spi *as)
+{
+ return spi_readl(as, VERSION) & 0x00000fff;
+}
+
+static void atmel_get_caps(struct atmel_spi *as)
+{
+ unsigned int version;
+
+ version = atmel_get_version(as);
+ dev_info(&as->pdev->dev, "version: 0x%x\n", version);
+
+ as->caps.is_spi2 = version > 0x121;
+ as->caps.has_wdrbt = version >= 0x210;
+ as->caps.has_dma_support = version >= 0x212;
+}
+
/*-------------------------------------------------------------------------*/
-static int __devinit atmel_spi_probe(struct platform_device *pdev)
+static int atmel_spi_probe(struct platform_device *pdev)
{
struct resource *regs;
int irq;
@@ -916,6 +1291,9 @@ static int __devinit atmel_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct atmel_spi *as;
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(&pdev->dev);
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENXIO;
@@ -924,23 +1302,24 @@ static int __devinit atmel_spi_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- clk = clk_get(&pdev->dev, "spi_clk");
+ clk = devm_clk_get(&pdev->dev, "spi_clk");
if (IS_ERR(clk))
return PTR_ERR(clk);
/* setup spi core then atmel-specific driver state */
ret = -ENOMEM;
- master = spi_alloc_master(&pdev->dev, sizeof *as);
+ master = spi_alloc_master(&pdev->dev, sizeof(*as));
if (!master)
goto out_free;
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
+ master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
- master->num_chipselect = 4;
+ master->num_chipselect = master->dev.of_node ? 0 : 4;
master->setup = atmel_spi_setup;
- master->transfer = atmel_spi_transfer;
+ master->transfer_one_message = atmel_spi_transfer_one_message;
master->cleanup = atmel_spi_cleanup;
platform_set_drvdata(pdev, master);
@@ -956,124 +1335,176 @@ static int __devinit atmel_spi_probe(struct platform_device *pdev)
goto out_free;
spin_lock_init(&as->lock);
- INIT_LIST_HEAD(&as->queue);
+
as->pdev = pdev;
- as->regs = ioremap(regs->start, resource_size(regs));
- if (!as->regs)
+ as->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(as->regs)) {
+ ret = PTR_ERR(as->regs);
goto out_free_buffer;
+ }
+ as->phybase = regs->start;
as->irq = irq;
as->clk = clk;
- ret = request_irq(irq, atmel_spi_interrupt, 0,
- dev_name(&pdev->dev), master);
+ init_completion(&as->xfer_completion);
+
+ atmel_get_caps(as);
+
+ as->use_dma = false;
+ as->use_pdc = false;
+ if (as->caps.has_dma_support) {
+ if (atmel_spi_configure_dma(as) == 0)
+ as->use_dma = true;
+ } else {
+ as->use_pdc = true;
+ }
+
+ if (as->caps.has_dma_support && !as->use_dma)
+ dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
+
+ if (as->use_pdc) {
+ ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
+ 0, dev_name(&pdev->dev), master);
+ } else {
+ ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
+ 0, dev_name(&pdev->dev), master);
+ }
if (ret)
goto out_unmap_regs;
/* Initialize the hardware */
- clk_enable(clk);
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_free_irq;
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
- spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
- spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+ if (as->caps.has_wdrbt) {
+ spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ } else {
+ spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
+ }
+
+ if (as->use_pdc)
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
spi_writel(as, CR, SPI_BIT(SPIEN));
/* go! */
dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
(unsigned long)regs->start, irq);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
- goto out_reset_hw;
+ goto out_free_dma;
return 0;
-out_reset_hw:
+out_free_dma:
+ if (as->use_dma)
+ atmel_spi_release_dma(as);
+
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
- clk_disable(clk);
- free_irq(irq, master);
+ clk_disable_unprepare(clk);
+out_free_irq:
out_unmap_regs:
- iounmap(as->regs);
out_free_buffer:
dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
as->buffer_dma);
out_free:
- clk_put(clk);
spi_master_put(master);
return ret;
}
-static int __devexit atmel_spi_remove(struct platform_device *pdev)
+static int atmel_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_message *msg;
/* reset the hardware and block queue progress */
spin_lock_irq(&as->lock);
- as->stopping = 1;
+ if (as->use_dma) {
+ atmel_spi_stop_dma(as);
+ atmel_spi_release_dma(as);
+ }
+
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
spi_readl(as, SR);
spin_unlock_irq(&as->lock);
- /* Terminate remaining queued transfers */
- list_for_each_entry(msg, &as->queue, queue) {
- /* REVISIT unmapping the dma is a NOP on ARM and AVR32
- * but we shouldn't depend on that...
- */
- msg->status = -ESHUTDOWN;
- msg->complete(msg->context);
- }
-
dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
as->buffer_dma);
- clk_disable(as->clk);
- clk_put(as->clk);
- free_irq(as->irq, master);
- iounmap(as->regs);
-
- spi_unregister_master(master);
+ clk_disable_unprepare(as->clk);
return 0;
}
-#ifdef CONFIG_PM
-
-static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int atmel_spi_suspend(struct device *dev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
+ int ret;
+
+ /* Stop the queue running */
+ ret = spi_master_suspend(master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
+ }
+
+ clk_disable_unprepare(as->clk);
+
+ pinctrl_pm_select_sleep_state(dev);
- clk_disable(as->clk);
return 0;
}
-static int atmel_spi_resume(struct platform_device *pdev)
+static int atmel_spi_resume(struct device *dev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
+ int ret;
- clk_enable(as->clk);
- return 0;
+ pinctrl_pm_select_default_state(dev);
+
+ clk_prepare_enable(as->clk);
+
+ /* Start the queue running */
+ ret = spi_master_resume(master);
+ if (ret)
+ dev_err(dev, "problem starting queue (%d)\n", ret);
+
+ return ret;
}
+static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
+
+#define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops)
#else
-#define atmel_spi_suspend NULL
-#define atmel_spi_resume NULL
+#define ATMEL_SPI_PM_OPS NULL
#endif
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_spi_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-spi" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
+#endif
static struct platform_driver atmel_spi_driver = {
.driver = {
.name = "atmel_spi",
.owner = THIS_MODULE,
+ .pm = ATMEL_SPI_PM_OPS,
+ .of_match_table = of_match_ptr(atmel_spi_dt_ids),
},
- .suspend = atmel_spi_suspend,
- .resume = atmel_spi_resume,
.probe = atmel_spi_probe,
- .remove = __exit_p(atmel_spi_remove),
+ .remove = atmel_spi_remove,
};
module_platform_driver(atmel_spi_driver);
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 4de66d1cfe5..67375a11d4b 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -55,8 +55,6 @@ struct au1550_spi {
volatile psc_spi_t __iomem *regs;
int irq;
- unsigned freq_max;
- unsigned freq_min;
unsigned len;
unsigned tx_count;
@@ -248,16 +246,8 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
hz = t->speed_hz;
}
- if (bpw < 4 || bpw > 24) {
- dev_err(&spi->dev, "setupxfer: invalid bits_per_word=%d\n",
- bpw);
+ if (!hz)
return -EINVAL;
- }
- if (hz > spi->max_speed_hz || hz > hw->freq_max || hz < hw->freq_min) {
- dev_err(&spi->dev, "setupxfer: clock rate=%d out of range\n",
- hz);
- return -EINVAL;
- }
au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
@@ -292,29 +282,6 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
return 0;
}
-static int au1550_spi_setup(struct spi_device *spi)
-{
- struct au1550_spi *hw = spi_master_get_devdata(spi->master);
-
- if (spi->bits_per_word < 4 || spi->bits_per_word > 24) {
- dev_err(&spi->dev, "setup: invalid bits_per_word=%d\n",
- spi->bits_per_word);
- return -EINVAL;
- }
-
- if (spi->max_speed_hz == 0)
- spi->max_speed_hz = hw->freq_max;
- if (spi->max_speed_hz > hw->freq_max
- || spi->max_speed_hz < hw->freq_min)
- return -EINVAL;
- /*
- * NOTE: cannot change speed and other hw settings immediately,
- * otherwise sharing of spi bus is not possible,
- * so do not call setupxfer(spi, NULL) here
- */
- return 0;
-}
-
/*
* for dma spi transfers, we have to setup rx channel, otherwise there is
* no reliable way how to recognize that spi transfer is done
@@ -717,7 +684,7 @@ static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw)
}
}
-static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
+static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
{
u32 stat, cfg;
@@ -766,7 +733,7 @@ static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
}
-static int __init au1550_spi_probe(struct platform_device *pdev)
+static int au1550_spi_probe(struct platform_device *pdev)
{
struct au1550_spi *hw;
struct spi_master *master;
@@ -782,11 +749,12 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 24);
hw = spi_master_get_devdata(master);
- hw->master = spi_master_get(master);
- hw->pdata = pdev->dev.platform_data;
+ hw->master = master;
+ hw->pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
if (hw->pdata == NULL) {
@@ -848,7 +816,6 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
hw->bitbang.master = hw->master;
hw->bitbang.setup_transfer = au1550_spi_setupxfer;
hw->bitbang.chipselect = au1550_spi_chipsel;
- hw->bitbang.master->setup = au1550_spi_setup;
hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
if (hw->usedma) {
@@ -919,8 +886,9 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
{
int min_div = (2 << 0) * (2 * (4 + 1));
int max_div = (2 << 3) * (2 * (63 + 1));
- hw->freq_max = hw->pdata->mainclk_hz / min_div;
- hw->freq_min = hw->pdata->mainclk_hz / (max_div + 1) + 1;
+ master->max_speed_hz = hw->pdata->mainclk_hz / min_div;
+ master->min_speed_hz =
+ hw->pdata->mainclk_hz / (max_div + 1) + 1;
}
au1550_spi_setup_psc_as_spi(hw);
@@ -968,7 +936,7 @@ err_nomem:
return err;
}
-static int __exit au1550_spi_remove(struct platform_device *pdev)
+static int au1550_spi_remove(struct platform_device *pdev)
{
struct au1550_spi *hw = platform_get_drvdata(pdev);
@@ -987,8 +955,6 @@ static int __exit au1550_spi_remove(struct platform_device *pdev)
au1xxx_dbdma_chan_free(hw->dma_tx_ch);
}
- platform_set_drvdata(pdev, NULL);
-
spi_master_put(hw->master);
return 0;
}
@@ -997,7 +963,8 @@ static int __exit au1550_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:au1550-spi");
static struct platform_driver au1550_spi_drv = {
- .remove = __exit_p(au1550_spi_remove),
+ .probe = au1550_spi_probe,
+ .remove = au1550_spi_remove,
.driver = {
.name = "au1550-spi",
.owner = THIS_MODULE,
@@ -1010,13 +977,22 @@ static int __init au1550_spi_init(void)
* create memory device with 8 bits dev_devwidth
* needed for proper byte ordering to spi fifo
*/
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ case ALCHEMY_CPU_AU1300:
+ break;
+ default:
+ return -ENODEV;
+ }
+
if (usedma) {
ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev);
if (!ddma_memid)
printk(KERN_ERR "au1550-spi: cannot add memory"
"dbdma device\n");
}
- return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe);
+ return platform_driver_register(&au1550_spi_drv);
}
module_init(au1550_spi_init);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
new file mode 100644
index 00000000000..69167456ec1
--- /dev/null
+++ b/drivers/spi/spi-bcm2835.c
@@ -0,0 +1,408 @@
+/*
+ * Driver for Broadcom BCM2835 SPI Controllers
+ *
+ * Copyright (C) 2012 Chris Boot
+ * Copyright (C) 2013 Stephen Warren
+ *
+ * This driver is inspired by:
+ * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ * spi-atmel.c, Copyright (C) 2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+/* SPI register offsets */
+#define BCM2835_SPI_CS 0x00
+#define BCM2835_SPI_FIFO 0x04
+#define BCM2835_SPI_CLK 0x08
+#define BCM2835_SPI_DLEN 0x0c
+#define BCM2835_SPI_LTOH 0x10
+#define BCM2835_SPI_DC 0x14
+
+/* Bitfields in CS */
+#define BCM2835_SPI_CS_LEN_LONG 0x02000000
+#define BCM2835_SPI_CS_DMA_LEN 0x01000000
+#define BCM2835_SPI_CS_CSPOL2 0x00800000
+#define BCM2835_SPI_CS_CSPOL1 0x00400000
+#define BCM2835_SPI_CS_CSPOL0 0x00200000
+#define BCM2835_SPI_CS_RXF 0x00100000
+#define BCM2835_SPI_CS_RXR 0x00080000
+#define BCM2835_SPI_CS_TXD 0x00040000
+#define BCM2835_SPI_CS_RXD 0x00020000
+#define BCM2835_SPI_CS_DONE 0x00010000
+#define BCM2835_SPI_CS_LEN 0x00002000
+#define BCM2835_SPI_CS_REN 0x00001000
+#define BCM2835_SPI_CS_ADCS 0x00000800
+#define BCM2835_SPI_CS_INTR 0x00000400
+#define BCM2835_SPI_CS_INTD 0x00000200
+#define BCM2835_SPI_CS_DMAEN 0x00000100
+#define BCM2835_SPI_CS_TA 0x00000080
+#define BCM2835_SPI_CS_CSPOL 0x00000040
+#define BCM2835_SPI_CS_CLEAR_RX 0x00000020
+#define BCM2835_SPI_CS_CLEAR_TX 0x00000010
+#define BCM2835_SPI_CS_CPOL 0x00000008
+#define BCM2835_SPI_CS_CPHA 0x00000004
+#define BCM2835_SPI_CS_CS_10 0x00000002
+#define BCM2835_SPI_CS_CS_01 0x00000001
+
+#define BCM2835_SPI_TIMEOUT_MS 30000
+#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS)
+
+#define DRV_NAME "spi-bcm2835"
+
+struct bcm2835_spi {
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ struct completion done;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+};
+
+static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
+{
+ return readl(bs->regs + reg);
+}
+
+static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val)
+{
+ writel(val, bs->regs + reg);
+}
+
+static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs, int len)
+{
+ u8 byte;
+
+ while (len--) {
+ byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
+ if (bs->rx_buf)
+ *bs->rx_buf++ = byte;
+ }
+}
+
+static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs, int len)
+{
+ u8 byte;
+
+ if (len > bs->len)
+ len = bs->len;
+
+ while (len--) {
+ byte = bs->tx_buf ? *bs->tx_buf++ : 0;
+ bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
+ bs->len--;
+ }
+}
+
+static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+ /*
+ * RXR - RX needs Reading. This means 12 (or more) bytes have been
+ * transmitted and hence 12 (or more) bytes have been received.
+ *
+ * The FIFO is 16-bytes deep. We check for this interrupt to keep the
+ * FIFO full; we have a 4-byte-time buffer for IRQ latency. We check
+ * this before DONE (TX empty) just in case we delayed processing this
+ * interrupt for some reason.
+ *
+ * We only check for this case if we have more bytes to TX; at the end
+ * of the transfer, we ignore this pipelining optimization, and let
+ * bcm2835_spi_finish_transfer() drain the RX FIFO.
+ */
+ if (bs->len && (cs & BCM2835_SPI_CS_RXR)) {
+ /* Read 12 bytes of data */
+ bcm2835_rd_fifo(bs, 12);
+
+ /* Write up to 12 bytes */
+ bcm2835_wr_fifo(bs, 12);
+
+ /*
+ * We must have written something to the TX FIFO due to the
+ * bs->len check above, so cannot be DONE. Hence, return
+ * early. Note that DONE could also be set if we serviced an
+ * RXR interrupt really late.
+ */
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * DONE - TX empty. This occurs when we first enable the transfer
+ * since we do not pre-fill the TX FIFO. At any other time, given that
+ * we refill the TX FIFO above based on RXR, and hence ignore DONE if
+ * RXR is set, DONE really does mean end-of-transfer.
+ */
+ if (cs & BCM2835_SPI_CS_DONE) {
+ if (bs->len) { /* First interrupt in a transfer */
+ bcm2835_wr_fifo(bs, 16);
+ } else { /* Transfer complete */
+ /* Disable SPI interrupts */
+ cs &= ~(BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD);
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+
+ /*
+ * Wake up bcm2835_spi_transfer_one(), which will call
+ * bcm2835_spi_finish_transfer(), to drain the RX FIFO.
+ */
+ complete(&bs->done);
+ }
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int bcm2835_spi_start_transfer(struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(spi->master);
+ unsigned long spi_hz, clk_hz, cdiv;
+ u32 cs = BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
+
+ spi_hz = tfr->speed_hz;
+ clk_hz = clk_get_rate(bs->clk);
+
+ if (spi_hz >= clk_hz / 2) {
+ cdiv = 2; /* clk_hz/2 is the fastest we can go */
+ } else if (spi_hz) {
+ /* CDIV must be a power of two */
+ cdiv = roundup_pow_of_two(DIV_ROUND_UP(clk_hz, spi_hz));
+
+ if (cdiv >= 65536)
+ cdiv = 0; /* 0 is the slowest we can go */
+ } else
+ cdiv = 0; /* 0 is the slowest we can go */
+
+ if (spi->mode & SPI_CPOL)
+ cs |= BCM2835_SPI_CS_CPOL;
+ if (spi->mode & SPI_CPHA)
+ cs |= BCM2835_SPI_CS_CPHA;
+
+ if (!(spi->mode & SPI_NO_CS)) {
+ if (spi->mode & SPI_CS_HIGH) {
+ cs |= BCM2835_SPI_CS_CSPOL;
+ cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select;
+ }
+
+ cs |= spi->chip_select;
+ }
+
+ reinit_completion(&bs->done);
+ bs->tx_buf = tfr->tx_buf;
+ bs->rx_buf = tfr->rx_buf;
+ bs->len = tfr->len;
+
+ bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
+ /*
+ * Enable the HW block. This will immediately trigger a DONE (TX
+ * empty) interrupt, upon which we will fill the TX FIFO with the
+ * first TX bytes. Pre-filling the TX FIFO here to avoid the
+ * interrupt doesn't work:-(
+ */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+
+ return 0;
+}
+
+static int bcm2835_spi_finish_transfer(struct spi_device *spi,
+ struct spi_transfer *tfr, bool cs_change)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(spi->master);
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+ /* Drain RX FIFO */
+ while (cs & BCM2835_SPI_CS_RXD) {
+ bcm2835_rd_fifo(bs, 1);
+ cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+ }
+
+ if (tfr->delay_usecs)
+ udelay(tfr->delay_usecs);
+
+ if (cs_change)
+ /* Clear TA flag */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs & ~BCM2835_SPI_CS_TA);
+
+ return 0;
+}
+
+static int bcm2835_spi_transfer_one(struct spi_master *master,
+ struct spi_message *mesg)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *tfr;
+ struct spi_device *spi = mesg->spi;
+ int err = 0;
+ unsigned int timeout;
+ bool cs_change;
+
+ list_for_each_entry(tfr, &mesg->transfers, transfer_list) {
+ err = bcm2835_spi_start_transfer(spi, tfr);
+ if (err)
+ goto out;
+
+ timeout = wait_for_completion_timeout(&bs->done,
+ msecs_to_jiffies(BCM2835_SPI_TIMEOUT_MS));
+ if (!timeout) {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ cs_change = tfr->cs_change ||
+ list_is_last(&tfr->transfer_list, &mesg->transfers);
+
+ err = bcm2835_spi_finish_transfer(spi, tfr, cs_change);
+ if (err)
+ goto out;
+
+ mesg->actual_length += (tfr->len - bs->len);
+ }
+
+out:
+ /* Clear FIFOs, and disable the HW block */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+ mesg->status = err;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static int bcm2835_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcm2835_spi *bs;
+ struct resource *res;
+ int err;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ dev_err(&pdev->dev, "spi_alloc_master() failed\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+
+ master->mode_bits = BCM2835_SPI_MODE_BITS;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->num_chipselect = 3;
+ master->transfer_one_message = bcm2835_spi_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+
+ bs = spi_master_get_devdata(master);
+
+ init_completion(&bs->done);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bs->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(bs->regs)) {
+ err = PTR_ERR(bs->regs);
+ goto out_master_put;
+ }
+
+ bs->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(bs->clk)) {
+ err = PTR_ERR(bs->clk);
+ dev_err(&pdev->dev, "could not get clk: %d\n", err);
+ goto out_master_put;
+ }
+
+ bs->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (bs->irq <= 0) {
+ dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
+ err = bs->irq ? bs->irq : -ENODEV;
+ goto out_master_put;
+ }
+
+ clk_prepare_enable(bs->clk);
+
+ err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
+ dev_name(&pdev->dev), master);
+ if (err) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
+ goto out_clk_disable;
+ }
+
+ /* initialise the hardware */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err) {
+ dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
+ goto out_clk_disable;
+ }
+
+ return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(bs->clk);
+out_master_put:
+ spi_master_put(master);
+ return err;
+}
+
+static int bcm2835_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+
+ /* Clear FIFOs, and disable the HW block */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835_spi_match[] = {
+ { .compatible = "brcm,bcm2835-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
+
+static struct platform_driver bcm2835_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = bcm2835_spi_match,
+ },
+ .probe = bcm2835_spi_probe,
+ .remove = bcm2835_spi_remove,
+};
+module_platform_driver(bcm2835_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
+MODULE_AUTHOR("Chris Boot <bootc@bootc.net>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
new file mode 100644
index 00000000000..86f5a98aa7a
--- /dev/null
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -0,0 +1,474 @@
+/*
+ * Broadcom BCM63XX High Speed SPI Controller driver
+ *
+ * Copyright 2000-2010 Broadcom Corporation
+ * Copyright 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/mutex.h>
+
+#define HSSPI_GLOBAL_CTRL_REG 0x0
+#define GLOBAL_CTRL_CS_POLARITY_SHIFT 0
+#define GLOBAL_CTRL_CS_POLARITY_MASK 0x000000ff
+#define GLOBAL_CTRL_PLL_CLK_CTRL_SHIFT 8
+#define GLOBAL_CTRL_PLL_CLK_CTRL_MASK 0x0000ff00
+#define GLOBAL_CTRL_CLK_GATE_SSOFF BIT(16)
+#define GLOBAL_CTRL_CLK_POLARITY BIT(17)
+#define GLOBAL_CTRL_MOSI_IDLE BIT(18)
+
+#define HSSPI_GLOBAL_EXT_TRIGGER_REG 0x4
+
+#define HSSPI_INT_STATUS_REG 0x8
+#define HSSPI_INT_STATUS_MASKED_REG 0xc
+#define HSSPI_INT_MASK_REG 0x10
+
+#define HSSPI_PINGx_CMD_DONE(i) BIT((i * 8) + 0)
+#define HSSPI_PINGx_RX_OVER(i) BIT((i * 8) + 1)
+#define HSSPI_PINGx_TX_UNDER(i) BIT((i * 8) + 2)
+#define HSSPI_PINGx_POLL_TIMEOUT(i) BIT((i * 8) + 3)
+#define HSSPI_PINGx_CTRL_INVAL(i) BIT((i * 8) + 4)
+
+#define HSSPI_INT_CLEAR_ALL 0xff001f1f
+
+#define HSSPI_PINGPONG_COMMAND_REG(x) (0x80 + (x) * 0x40)
+#define PINGPONG_CMD_COMMAND_MASK 0xf
+#define PINGPONG_COMMAND_NOOP 0
+#define PINGPONG_COMMAND_START_NOW 1
+#define PINGPONG_COMMAND_START_TRIGGER 2
+#define PINGPONG_COMMAND_HALT 3
+#define PINGPONG_COMMAND_FLUSH 4
+#define PINGPONG_CMD_PROFILE_SHIFT 8
+#define PINGPONG_CMD_SS_SHIFT 12
+
+#define HSSPI_PINGPONG_STATUS_REG(x) (0x84 + (x) * 0x40)
+
+#define HSSPI_PROFILE_CLK_CTRL_REG(x) (0x100 + (x) * 0x20)
+#define CLK_CTRL_FREQ_CTRL_MASK 0x0000ffff
+#define CLK_CTRL_SPI_CLK_2X_SEL BIT(14)
+#define CLK_CTRL_ACCUM_RST_ON_LOOP BIT(15)
+
+#define HSSPI_PROFILE_SIGNAL_CTRL_REG(x) (0x104 + (x) * 0x20)
+#define SIGNAL_CTRL_LATCH_RISING BIT(12)
+#define SIGNAL_CTRL_LAUNCH_RISING BIT(13)
+#define SIGNAL_CTRL_ASYNC_INPUT_PATH BIT(16)
+
+#define HSSPI_PROFILE_MODE_CTRL_REG(x) (0x108 + (x) * 0x20)
+#define MODE_CTRL_MULTIDATA_RD_STRT_SHIFT 8
+#define MODE_CTRL_MULTIDATA_WR_STRT_SHIFT 12
+#define MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT 16
+#define MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT 18
+#define MODE_CTRL_MODE_3WIRE BIT(20)
+#define MODE_CTRL_PREPENDBYTE_CNT_SHIFT 24
+
+#define HSSPI_FIFO_REG(x) (0x200 + (x) * 0x200)
+
+
+#define HSSPI_OP_CODE_SHIFT 13
+#define HSSPI_OP_SLEEP (0 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ_WRITE (1 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_WRITE (2 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ (3 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_SETIRQ (4 << HSSPI_OP_CODE_SHIFT)
+
+#define HSSPI_BUFFER_LEN 512
+#define HSSPI_OPCODE_LEN 2
+
+#define HSSPI_MAX_PREPEND_LEN 15
+
+#define HSSPI_MAX_SYNC_CLOCK 30000000
+
+#define HSSPI_BUS_NUM 1 /* 0 is legacy SPI */
+
+struct bcm63xx_hsspi {
+ struct completion done;
+ struct mutex bus_mutex;
+
+ struct platform_device *pdev;
+ struct clk *clk;
+ void __iomem *regs;
+ u8 __iomem *fifo;
+
+ u32 speed_hz;
+ u8 cs_polarity;
+};
+
+static void bcm63xx_hsspi_set_cs(struct bcm63xx_hsspi *bs, unsigned cs,
+ bool active)
+{
+ u32 reg;
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ reg &= ~BIT(cs);
+ if (active == !(bs->cs_polarity & BIT(cs)))
+ reg |= BIT(cs);
+
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static void bcm63xx_hsspi_set_clk(struct bcm63xx_hsspi *bs,
+ struct spi_device *spi, int hz)
+{
+ unsigned profile = spi->chip_select;
+ u32 reg;
+
+ reg = DIV_ROUND_UP(2048, DIV_ROUND_UP(bs->speed_hz, hz));
+ __raw_writel(CLK_CTRL_ACCUM_RST_ON_LOOP | reg,
+ bs->regs + HSSPI_PROFILE_CLK_CTRL_REG(profile));
+
+ reg = __raw_readl(bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+ if (hz > HSSPI_MAX_SYNC_CLOCK)
+ reg |= SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ else
+ reg &= ~SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ __raw_writel(reg, bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+
+ mutex_lock(&bs->bus_mutex);
+ /* setup clock polarity */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CLK_POLARITY;
+ if (spi->mode & SPI_CPOL)
+ reg |= GLOBAL_CTRL_CLK_POLARITY;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ unsigned chip_select = spi->chip_select;
+ u16 opcode = 0;
+ int pending = t->len;
+ int step_size = HSSPI_BUFFER_LEN;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+
+ bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+
+ if (tx && rx)
+ opcode = HSSPI_OP_READ_WRITE;
+ else if (tx)
+ opcode = HSSPI_OP_WRITE;
+ else if (rx)
+ opcode = HSSPI_OP_READ;
+
+ if (opcode != HSSPI_OP_READ)
+ step_size -= HSSPI_OPCODE_LEN;
+
+ __raw_writel(0 << MODE_CTRL_PREPENDBYTE_CNT_SHIFT |
+ 2 << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT |
+ 2 << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT | 0xff,
+ bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+
+ while (pending > 0) {
+ int curr_step = min_t(int, step_size, pending);
+
+ reinit_completion(&bs->done);
+ if (tx) {
+ memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
+ tx += curr_step;
+ }
+
+ __raw_writew(opcode | curr_step, bs->fifo);
+
+ /* enable interrupt */
+ __raw_writel(HSSPI_PINGx_CMD_DONE(0),
+ bs->regs + HSSPI_INT_MASK_REG);
+
+ /* start the transfer */
+ __raw_writel(!chip_select << PINGPONG_CMD_SS_SHIFT |
+ chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+ PINGPONG_COMMAND_START_NOW,
+ bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+
+ if (wait_for_completion_timeout(&bs->done, HZ) == 0) {
+ dev_err(&bs->pdev->dev, "transfer timed out!\n");
+ return -ETIMEDOUT;
+ }
+
+ if (rx) {
+ memcpy_fromio(rx, bs->fifo, curr_step);
+ rx += curr_step;
+ }
+
+ pending -= curr_step;
+ }
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_setup(struct spi_device *spi)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = __raw_readl(bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+ reg &= ~(SIGNAL_CTRL_LAUNCH_RISING | SIGNAL_CTRL_LATCH_RISING);
+ if (spi->mode & SPI_CPHA)
+ reg |= SIGNAL_CTRL_LAUNCH_RISING;
+ else
+ reg |= SIGNAL_CTRL_LATCH_RISING;
+ __raw_writel(reg, bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ /* only change actual polarities if there is no transfer */
+ if ((reg & GLOBAL_CTRL_CS_POLARITY_MASK) == bs->cs_polarity) {
+ if (spi->mode & SPI_CS_HIGH)
+ reg |= BIT(spi->chip_select);
+ else
+ reg &= ~BIT(spi->chip_select);
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ }
+
+ if (spi->mode & SPI_CS_HIGH)
+ bs->cs_polarity |= BIT(spi->chip_select);
+ else
+ bs->cs_polarity &= ~BIT(spi->chip_select);
+
+ mutex_unlock(&bs->bus_mutex);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ struct spi_device *spi = msg->spi;
+ int status = -EINVAL;
+ int dummy_cs;
+ u32 reg;
+
+ /* This controller does not support keeping CS active during idle.
+ * To work around this, we use the following ugly hack:
+ *
+ * a. Invert the target chip select's polarity so it will be active.
+ * b. Select a "dummy" chip select to use as the hardware target.
+ * c. Invert the dummy chip select's polarity so it will be inactive
+ * during the actual transfers.
+ * d. Tell the hardware to send to the dummy chip select. Thanks to
+ * the multiplexed nature of SPI the actual target will receive
+ * the transfer and we see its response.
+ *
+ * e. At the end restore the polarities again to their default values.
+ */
+
+ dummy_cs = !spi->chip_select;
+ bcm63xx_hsspi_set_cs(bs, dummy_cs, true);
+
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ status = bcm63xx_hsspi_do_txrx(spi, t);
+ if (status)
+ break;
+
+ msg->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (t->cs_change)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+ }
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CS_POLARITY_MASK;
+ reg |= bs->cs_polarity;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static irqreturn_t bcm63xx_hsspi_interrupt(int irq, void *dev_id)
+{
+ struct bcm63xx_hsspi *bs = (struct bcm63xx_hsspi *)dev_id;
+
+ if (__raw_readl(bs->regs + HSSPI_INT_STATUS_MASKED_REG) == 0)
+ return IRQ_NONE;
+
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ complete(&bs->done);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm63xx_hsspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcm63xx_hsspi *bs;
+ struct resource *res_mem;
+ void __iomem *regs;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int irq, ret;
+ u32 reg, rate;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq\n");
+ return -ENXIO;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res_mem);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ clk = devm_clk_get(dev, "hsspi");
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rate = clk_get_rate(clk);
+ if (!rate)
+ return -EINVAL;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ ret = -ENOMEM;
+ goto out_disable_clk;
+ }
+
+ bs = spi_master_get_devdata(master);
+ bs->pdev = pdev;
+ bs->clk = clk;
+ bs->regs = regs;
+ bs->speed_hz = rate;
+ bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
+
+ mutex_init(&bs->bus_mutex);
+ init_completion(&bs->done);
+
+ master->bus_num = HSSPI_BUS_NUM;
+ master->num_chipselect = 8;
+ master->setup = bcm63xx_hsspi_setup;
+ master->transfer_one_message = bcm63xx_hsspi_transfer_one;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ /* Initialize the hardware */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ /* clean up any pending interrupts */
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+
+ /* read out default CS polarities */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ bs->cs_polarity = reg & GLOBAL_CTRL_CS_POLARITY_MASK;
+ __raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
+ bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ ret = devm_request_irq(dev, irq, bcm63xx_hsspi_interrupt, IRQF_SHARED,
+ pdev->name, bs);
+
+ if (ret)
+ goto out_put_master;
+
+ /* register and we are done */
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto out_put_master;
+
+ return 0;
+
+out_put_master:
+ spi_master_put(master);
+out_disable_clk:
+ clk_disable_unprepare(clk);
+ return ret;
+}
+
+
+static int bcm63xx_hsspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+ /* reset the hardware and block queue progress */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcm63xx_hsspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
+
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bcm63xx_hsspi_pm_ops, bcm63xx_hsspi_suspend,
+ bcm63xx_hsspi_resume);
+
+static struct platform_driver bcm63xx_hsspi_driver = {
+ .driver = {
+ .name = "bcm63xx-hsspi",
+ .owner = THIS_MODULE,
+ .pm = &bcm63xx_hsspi_pm_ops,
+ },
+ .probe = bcm63xx_hsspi_probe,
+ .remove = bcm63xx_hsspi_remove,
+};
+
+module_platform_driver(bcm63xx_hsspi_driver);
+
+MODULE_ALIAS("platform:bcm63xx_hsspi");
+MODULE_DESCRIPTION("Broadcom BCM63xx High Speed SPI Controller driver");
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index a9f4049c676..8510400e786 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -20,7 +20,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -30,13 +29,11 @@
#include <linux/spi/spi.h>
#include <linux/completion.h>
#include <linux/err.h>
-#include <linux/workqueue.h>
#include <linux/pm_runtime.h>
#include <bcm63xx_dev_spi.h>
-#define PFX KBUILD_MODNAME
-#define DRV_VER "0.1.2"
+#define BCM63XX_SPI_MAX_PREPEND 15
struct bcm63xx_spi {
struct completion done;
@@ -45,21 +42,14 @@ struct bcm63xx_spi {
int irq;
/* Platform data */
- u32 speed_hz;
unsigned fifo_size;
unsigned int msg_type_shift;
unsigned int msg_ctl_width;
- /* Data buffers */
- const unsigned char *tx_ptr;
- unsigned char *rx_ptr;
-
/* data iomem */
u8 __iomem *tx_io;
const u8 __iomem *rx_io;
- int remaining_bytes;
-
struct clk *clk;
struct platform_device *pdev;
};
@@ -98,40 +88,16 @@ static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
{ 391000, SPI_CLK_0_391MHZ }
};
-static int bcm63xx_spi_check_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- u8 bits_per_word;
-
- bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
- if (bits_per_word != 8) {
- dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, bits_per_word);
- return -EINVAL;
- }
-
- if (spi->chip_select > spi->master->num_chipselect) {
- dev_err(&spi->dev, "%s, unsupported slave %d\n",
- __func__, spi->chip_select);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
- u32 hz;
u8 clk_cfg, reg;
int i;
- hz = (t) ? t->speed_hz : spi->max_speed_hz;
-
/* Find the closest clock configuration */
for (i = 0; i < SPI_CLK_MASK; i++) {
- if (hz >= bcm63xx_spi_freq_table[i][0]) {
+ if (t->speed_hz >= bcm63xx_spi_freq_table[i][0]) {
clk_cfg = bcm63xx_spi_freq_table[i][1];
break;
}
@@ -148,59 +114,23 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
bcm_spi_writeb(bs, reg, SPI_CLK_CFG);
dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n",
- clk_cfg, hz);
+ clk_cfg, t->speed_hz);
}
/* the spi->mode bits understood by this driver: */
#define MODEBITS (SPI_CPOL | SPI_CPHA)
-static int bcm63xx_spi_setup(struct spi_device *spi)
-{
- struct bcm63xx_spi *bs;
- int ret;
-
- bs = spi_master_get_devdata(spi->master);
-
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
- if (spi->mode & ~MODEBITS) {
- dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
- __func__, spi->mode & ~MODEBITS);
- return -EINVAL;
- }
-
- ret = bcm63xx_spi_check_transfer(spi, NULL);
- if (ret < 0) {
- dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return ret;
- }
-
- dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
- __func__, spi->mode & MODEBITS, spi->bits_per_word, 0);
-
- return 0;
-}
-
-/* Fill the TX FIFO with as many bytes as possible */
-static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs)
-{
- u8 size;
-
- /* Fill the Tx FIFO with as many bytes as possible */
- size = bs->remaining_bytes < bs->fifo_size ? bs->remaining_bytes :
- bs->fifo_size;
- memcpy_toio(bs->tx_io, bs->tx_ptr, size);
- bs->remaining_bytes -= size;
-}
-
-static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
- struct spi_transfer *t)
+static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
+ unsigned int num_transfers)
{
struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
u16 msg_ctl;
u16 cmd;
+ u8 rx_tail;
+ unsigned int i, timeout = 0, prepend_len = 0, len = 0;
+ struct spi_transfer *t = first;
+ bool do_rx = false;
+ bool do_tx = false;
/* Disable the CMD_DONE interrupt */
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
@@ -208,25 +138,43 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
t->tx_buf, t->rx_buf, t->len);
- /* Transmitter is inhibited */
- bs->tx_ptr = t->tx_buf;
- bs->rx_ptr = t->rx_buf;
+ if (num_transfers > 1 && t->tx_buf && t->len <= BCM63XX_SPI_MAX_PREPEND)
+ prepend_len = t->len;
+
+ /* prepare the buffer */
+ for (i = 0; i < num_transfers; i++) {
+ if (t->tx_buf) {
+ do_tx = true;
+ memcpy_toio(bs->tx_io + len, t->tx_buf, t->len);
+
+ /* don't prepend more than one tx */
+ if (t != first)
+ prepend_len = 0;
+ }
+
+ if (t->rx_buf) {
+ do_rx = true;
+ /* prepend is half-duplex write only */
+ if (t == first)
+ prepend_len = 0;
+ }
+
+ len += t->len;
- if (t->tx_buf) {
- bs->remaining_bytes = t->len;
- bcm63xx_spi_fill_tx_fifo(bs);
+ t = list_entry(t->transfer_list.next, struct spi_transfer,
+ transfer_list);
}
- init_completion(&bs->done);
+ reinit_completion(&bs->done);
/* Fill in the Message control register */
- msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
+ msg_ctl = (len << SPI_BYTE_CNT_SHIFT);
- if (t->rx_buf && t->tx_buf)
+ if (do_rx && do_tx && prepend_len == 0)
msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);
- else if (t->rx_buf)
+ else if (do_rx)
msg_ctl |= (SPI_HD_R << bs->msg_type_shift);
- else if (t->tx_buf)
+ else if (do_tx)
msg_ctl |= (SPI_HD_W << bs->msg_type_shift);
switch (bs->msg_ctl_width) {
@@ -240,30 +188,33 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
/* Issue the transfer */
cmd = SPI_CMD_START_IMMEDIATE;
- cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
+ cmd |= (prepend_len << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
bcm_spi_writew(bs, cmd, SPI_CMD);
/* Enable the CMD_DONE interrupt */
bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
- return t->len - bs->remaining_bytes;
-}
+ timeout = wait_for_completion_timeout(&bs->done, HZ);
+ if (!timeout)
+ return -ETIMEDOUT;
-static int bcm63xx_spi_prepare_transfer(struct spi_master *master)
-{
- struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ if (!do_rx)
+ return 0;
- pm_runtime_get_sync(&bs->pdev->dev);
+ len = 0;
+ t = first;
+ /* Read out all the data */
+ for (i = 0; i < num_transfers; i++) {
+ if (t->rx_buf)
+ memcpy_fromio(t->rx_buf, bs->rx_io + len, t->len);
- return 0;
-}
-
-static int bcm63xx_spi_unprepare_transfer(struct spi_master *master)
-{
- struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ if (t != first || prepend_len == 0)
+ len += t->len;
- pm_runtime_put(&bs->pdev->dev);
+ t = list_entry(t->transfer_list.next, struct spi_transfer,
+ transfer_list);
+ }
return 0;
}
@@ -272,41 +223,72 @@ static int bcm63xx_spi_transfer_one(struct spi_master *master,
struct spi_message *m)
{
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
- struct spi_transfer *t;
+ struct spi_transfer *t, *first = NULL;
struct spi_device *spi = m->spi;
int status = 0;
- unsigned int timeout = 0;
-
+ unsigned int n_transfers = 0, total_len = 0;
+ bool can_use_prepend = false;
+
+ /*
+ * This SPI controller does not support keeping CS active after a
+ * transfer.
+ * Work around this by merging as many transfers we can into one big
+ * full-duplex transfers.
+ */
list_for_each_entry(t, &m->transfers, transfer_list) {
- unsigned int len = t->len;
- u8 rx_tail;
+ if (!first)
+ first = t;
+
+ n_transfers++;
+ total_len += t->len;
+
+ if (n_transfers == 2 && !first->rx_buf && !t->tx_buf &&
+ first->len <= BCM63XX_SPI_MAX_PREPEND)
+ can_use_prepend = true;
+ else if (can_use_prepend && t->tx_buf)
+ can_use_prepend = false;
+
+ /* we can only transfer one fifo worth of data */
+ if ((can_use_prepend &&
+ total_len > (bs->fifo_size + BCM63XX_SPI_MAX_PREPEND)) ||
+ (!can_use_prepend && total_len > bs->fifo_size)) {
+ dev_err(&spi->dev, "unable to do transfers larger than FIFO size (%i > %i)\n",
+ total_len, bs->fifo_size);
+ status = -EINVAL;
+ goto exit;
+ }
- status = bcm63xx_spi_check_transfer(spi, t);
- if (status < 0)
+ /* all combined transfers have to have the same speed */
+ if (t->speed_hz != first->speed_hz) {
+ dev_err(&spi->dev, "unable to change speed between transfers\n");
+ status = -EINVAL;
goto exit;
+ }
- /* configure adapter for a new transfer */
- bcm63xx_spi_setup_transfer(spi, t);
+ /* CS will be deasserted directly after transfer */
+ if (t->delay_usecs) {
+ dev_err(&spi->dev, "unable to keep CS asserted after transfer\n");
+ status = -EINVAL;
+ goto exit;
+ }
- while (len) {
- /* send the data */
- len -= bcm63xx_txrx_bufs(spi, t);
+ if (t->cs_change ||
+ list_is_last(&t->transfer_list, &m->transfers)) {
+ /* configure adapter for a new transfer */
+ bcm63xx_spi_setup_transfer(spi, first);
- timeout = wait_for_completion_timeout(&bs->done, HZ);
- if (!timeout) {
- status = -ETIMEDOUT;
+ /* send the data */
+ status = bcm63xx_txrx_bufs(spi, first, n_transfers);
+ if (status)
goto exit;
- }
- /* read out all data */
- rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
+ m->actual_length += total_len;
- /* Read out all the data */
- if (rx_tail)
- memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
+ first = NULL;
+ n_transfers = 0;
+ total_len = 0;
+ can_use_prepend = false;
}
-
- m->actual_length += t->len;
}
exit:
m->status = status;
@@ -337,62 +319,45 @@ static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
}
-static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
+static int bcm63xx_spi_probe(struct platform_device *pdev)
{
struct resource *r;
struct device *dev = &pdev->dev;
- struct bcm63xx_spi_pdata *pdata = pdev->dev.platform_data;
+ struct bcm63xx_spi_pdata *pdata = dev_get_platdata(&pdev->dev);
int irq;
struct spi_master *master;
struct clk *clk;
struct bcm63xx_spi *bs;
int ret;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(dev, "no iomem\n");
- ret = -ENXIO;
- goto out;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "no irq\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
- clk = clk_get(dev, "spi");
+ clk = devm_clk_get(dev, "spi");
if (IS_ERR(clk)) {
dev_err(dev, "no clock for device\n");
- ret = PTR_ERR(clk);
- goto out;
+ return PTR_ERR(clk);
}
master = spi_alloc_master(dev, sizeof(*bs));
if (!master) {
dev_err(dev, "out of memory\n");
- ret = -ENOMEM;
- goto out_clk;
+ return -ENOMEM;
}
bs = spi_master_get_devdata(master);
+ init_completion(&bs->done);
platform_set_drvdata(pdev, master);
bs->pdev = pdev;
- if (!devm_request_mem_region(&pdev->dev, r->start,
- resource_size(r), PFX)) {
- dev_err(dev, "iomem request failed\n");
- ret = -ENXIO;
- goto out_err;
- }
-
- bs->regs = devm_ioremap_nocache(&pdev->dev, r->start,
- resource_size(r));
- if (!bs->regs) {
- dev_err(dev, "unable to ioremap regs\n");
- ret = -ENOMEM;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bs->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(bs->regs)) {
+ ret = PTR_ERR(bs->regs);
goto out_err;
}
@@ -409,12 +374,10 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
- master->setup = bcm63xx_spi_setup;
- master->prepare_transfer_hardware = bcm63xx_spi_prepare_transfer;
- master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer;
master->transfer_one_message = bcm63xx_spi_transfer_one;
master->mode_bits = MODEBITS;
- bs->speed_hz = pdata->speed_hz;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
bs->msg_type_shift = pdata->msg_type_shift;
bs->msg_ctl_width = pdata->msg_ctl_width;
bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
@@ -427,98 +390,90 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
default:
dev_err(dev, "unsupported MSG_CTL width: %d\n",
bs->msg_ctl_width);
- goto out_clk_disable;
+ goto out_err;
}
/* Initialize hardware */
- clk_enable(bs->clk);
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ goto out_err;
+
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
/* register and we are done */
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret) {
dev_err(dev, "spi register failed\n");
goto out_clk_disable;
}
- dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d) v%s\n",
- r->start, irq, bs->fifo_size, DRV_VER);
+ dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n",
+ r->start, irq, bs->fifo_size);
return 0;
out_clk_disable:
- clk_disable(clk);
+ clk_disable_unprepare(clk);
out_err:
- platform_set_drvdata(pdev, NULL);
spi_master_put(master);
-out_clk:
- clk_put(clk);
-out:
return ret;
}
-static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
+static int bcm63xx_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct spi_master *master = platform_get_drvdata(pdev);
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
- spi_unregister_master(master);
-
/* reset spi block */
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
/* HW shutdown */
- clk_disable(bs->clk);
- clk_put(bs->clk);
-
- platform_set_drvdata(pdev, 0);
-
- spi_master_put(master);
+ clk_disable_unprepare(bs->clk);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int bcm63xx_spi_suspend(struct device *dev)
{
- struct spi_master *master =
- platform_get_drvdata(to_platform_device(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
- clk_disable(bs->clk);
+ spi_master_suspend(master);
+
+ clk_disable_unprepare(bs->clk);
return 0;
}
static int bcm63xx_spi_resume(struct device *dev)
{
- struct spi_master *master =
- platform_get_drvdata(to_platform_device(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ int ret;
- clk_enable(bs->clk);
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
+
+ spi_master_resume(master);
return 0;
}
+#endif
static const struct dev_pm_ops bcm63xx_spi_pm_ops = {
- .suspend = bcm63xx_spi_suspend,
- .resume = bcm63xx_spi_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(bcm63xx_spi_suspend, bcm63xx_spi_resume)
};
-#define BCM63XX_SPI_PM_OPS (&bcm63xx_spi_pm_ops)
-#else
-#define BCM63XX_SPI_PM_OPS NULL
-#endif
-
static struct platform_driver bcm63xx_spi_driver = {
.driver = {
.name = "bcm63xx-spi",
.owner = THIS_MODULE,
- .pm = BCM63XX_SPI_PM_OPS,
+ .pm = &bcm63xx_spi_pm_ops,
},
.probe = bcm63xx_spi_probe,
- .remove = __devexit_p(bcm63xx_spi_remove),
+ .remove = bcm63xx_spi_remove,
};
module_platform_driver(bcm63xx_spi_driver);
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 6555ecd0730..f515c5e9db5 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -8,7 +8,6 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -416,9 +415,8 @@ bfin_sport_spi_pump_transfers(unsigned long data)
drv_data->cs_change = transfer->cs_change;
/* Bits per word setup */
- bits_per_word = transfer->bits_per_word ? :
- message->spi->bits_per_word ? : 8;
- if (bits_per_word % 16 == 0)
+ bits_per_word = transfer->bits_per_word;
+ if (bits_per_word == 16)
drv_data->ops = &bfin_sport_transfer_ops_u16;
else
drv_data->ops = &bfin_sport_transfer_ops_u8;
@@ -593,7 +591,7 @@ bfin_sport_spi_setup(struct spi_device *spi)
*/
if (chip_info->ctl_reg || chip_info->enable_dma) {
ret = -EINVAL;
- dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields");
+ dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields\n");
goto error;
}
chip->cs_chg_udelay = chip_info->cs_chg_udelay;
@@ -601,13 +599,6 @@ bfin_sport_spi_setup(struct spi_device *spi)
}
}
- if (spi->bits_per_word % 8) {
- dev_err(&spi->dev, "%d bits_per_word is not supported\n",
- spi->bits_per_word);
- ret = -EINVAL;
- goto error;
- }
-
/* translate common spi framework into our register
* following configure contents are same for tx and rx.
*/
@@ -755,8 +746,7 @@ bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
return 0;
}
-static int __devinit
-bfin_sport_spi_probe(struct platform_device *pdev)
+static int bfin_sport_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bfin5xx_spi_master *platform_info;
@@ -765,7 +755,7 @@ bfin_sport_spi_probe(struct platform_device *pdev)
struct bfin_sport_spi_master_data *drv_data;
int status;
- platform_info = dev->platform_data;
+ platform_info = dev_get_platdata(dev);
/* Allocate master with space for drv_data */
master = spi_alloc_master(dev, sizeof(*master) + 16);
@@ -780,6 +770,7 @@ bfin_sport_spi_probe(struct platform_device *pdev)
drv_data->pin_req = platform_info->pin_req;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->bus_num = pdev->id;
master->num_chipselect = platform_info->num_chipselect;
master->cleanup = bfin_sport_spi_cleanup;
@@ -863,8 +854,7 @@ bfin_sport_spi_probe(struct platform_device *pdev)
}
/* stop hardware and remove the driver */
-static int __devexit
-bfin_sport_spi_remove(struct platform_device *pdev)
+static int bfin_sport_spi_remove(struct platform_device *pdev)
{
struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
int status = 0;
@@ -885,17 +875,13 @@ bfin_sport_spi_remove(struct platform_device *pdev)
peripheral_free_list(drv_data->pin_req);
- /* Prevent double remove */
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
-#ifdef CONFIG_PM
-static int
-bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int bfin_sport_spi_suspend(struct device *dev)
{
- struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
int status;
status = bfin_sport_spi_stop_queue(drv_data);
@@ -908,10 +894,9 @@ bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
return status;
}
-static int
-bfin_sport_spi_resume(struct platform_device *pdev)
+static int bfin_sport_spi_resume(struct device *dev)
{
- struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
int status;
/* Enable the SPI interface */
@@ -924,19 +909,22 @@ bfin_sport_spi_resume(struct platform_device *pdev)
return status;
}
+
+static SIMPLE_DEV_PM_OPS(bfin_sport_spi_pm_ops, bfin_sport_spi_suspend,
+ bfin_sport_spi_resume);
+
+#define BFIN_SPORT_SPI_PM_OPS (&bfin_sport_spi_pm_ops)
#else
-# define bfin_sport_spi_suspend NULL
-# define bfin_sport_spi_resume NULL
+#define BFIN_SPORT_SPI_PM_OPS NULL
#endif
static struct platform_driver bfin_sport_spi_driver = {
.driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = BFIN_SPORT_SPI_PM_OPS,
},
.probe = bfin_sport_spi_probe,
- .remove = __devexit_p(bfin_sport_spi_remove),
- .suspend = bfin_sport_spi_suspend,
- .resume = bfin_sport_spi_resume,
+ .remove = bfin_sport_spi_remove,
};
module_platform_driver(bfin_sport_spi_driver);
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 9bb4d4af854..ebf720b88a2 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/ioport.h>
@@ -350,7 +351,6 @@ static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data)
static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
{
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
- struct spi_transfer *last_transfer;
unsigned long flags;
struct spi_message *msg;
@@ -362,9 +362,6 @@ static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
queue_work(drv_data->workqueue, &drv_data->pump_messages);
spin_unlock_irqrestore(&drv_data->lock, flags);
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer, transfer_list);
-
msg->state = NULL;
if (!drv_data->cs_change)
@@ -524,7 +521,7 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
timeout = jiffies + HZ;
while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF))
if (!time_before(jiffies, timeout)) {
- dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF");
+ dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF\n");
break;
} else
cpu_relax();
@@ -642,23 +639,17 @@ static void bfin_spi_pump_transfers(unsigned long data)
drv_data->cs_change = transfer->cs_change;
/* Bits per word setup */
- bits_per_word = transfer->bits_per_word ? :
- message->spi->bits_per_word ? : 8;
- if (bits_per_word % 16 == 0) {
+ bits_per_word = transfer->bits_per_word;
+ if (bits_per_word == 16) {
drv_data->n_bytes = bits_per_word/8;
drv_data->len = (transfer->len) >> 1;
cr_width = BIT_CTL_WORDSIZE;
drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
- } else if (bits_per_word % 8 == 0) {
+ } else if (bits_per_word == 8) {
drv_data->n_bytes = bits_per_word/8;
drv_data->len = transfer->len;
cr_width = 0;
drv_data->ops = &bfin_bfin_spi_transfer_ops_u8;
- } else {
- dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n");
- message->status = -EINVAL;
- bfin_spi_giveback(drv_data);
- return;
}
cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE);
cr |= cr_width;
@@ -809,13 +800,13 @@ static void bfin_spi_pump_transfers(unsigned long data)
bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
else {
int loop;
- if (bits_per_word % 16 == 0) {
+ if (bits_per_word == 16) {
u16 *buf = (u16 *)drv_data->tx;
for (loop = 0; loop < bits_per_word / 16;
loop++) {
bfin_write(&drv_data->regs->tdbr, *buf++);
}
- } else if (bits_per_word % 8 == 0) {
+ } else if (bits_per_word == 8) {
u8 *buf = (u8 *)drv_data->tx;
for (loop = 0; loop < bits_per_word / 8; loop++)
bfin_write(&drv_data->regs->tdbr, *buf++);
@@ -919,8 +910,9 @@ static void bfin_spi_pump_messages(struct work_struct *work)
drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
struct spi_transfer, transfer_list);
- dev_dbg(&drv_data->pdev->dev, "got a message to pump, "
- "state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
+ dev_dbg(&drv_data->pdev->dev,
+ "got a message to pump, state is set to: baud "
+ "%d, flag 0x%x, ctl 0x%x\n",
drv_data->cur_chip->baud, drv_data->cur_chip->flag,
drv_data->cur_chip->ctl_reg);
@@ -1019,8 +1011,8 @@ static int bfin_spi_setup(struct spi_device *spi)
* but let's assume (for now) they do.
*/
if (chip_info->ctl_reg & ~bfin_ctl_reg) {
- dev_err(&spi->dev, "do not set bits in ctl_reg "
- "that the SPI framework manages\n");
+ dev_err(&spi->dev,
+ "do not set bits in ctl_reg that the SPI framework manages\n");
goto error;
}
chip->enable_dma = chip_info->enable_dma != 0
@@ -1034,17 +1026,7 @@ static int bfin_spi_setup(struct spi_device *spi)
chip->ctl_reg &= bfin_ctl_reg;
}
- if (spi->bits_per_word % 8) {
- dev_err(&spi->dev, "%d bits_per_word is not supported\n",
- spi->bits_per_word);
- goto error;
- }
-
/* translate common spi framework into our register */
- if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) {
- dev_err(&spi->dev, "unsupported spi modes detected\n");
- goto error;
- }
if (spi->mode & SPI_CPOL)
chip->ctl_reg |= BIT_CTL_CPOL;
if (spi->mode & SPI_CPHA)
@@ -1062,17 +1044,17 @@ static int bfin_spi_setup(struct spi_device *spi)
chip->chip_select_num = spi->chip_select;
if (chip->chip_select_num < MAX_CTRL_CS) {
if (!(spi->mode & SPI_CPHA))
- dev_warn(&spi->dev, "Warning: SPI CPHA not set:"
- " Slave Select not under software control!\n"
- " See Documentation/blackfin/bfin-spi-notes.txt");
+ dev_warn(&spi->dev,
+ "Warning: SPI CPHA not set: Slave Select not under software control!\n"
+ "See Documentation/blackfin/bfin-spi-notes.txt\n");
chip->flag = (1 << spi->chip_select) << 8;
} else
chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS;
if (chip->enable_dma && chip->pio_interrupt) {
- dev_err(&spi->dev, "enable_dma is set, "
- "do not set pio_interrupt\n");
+ dev_err(&spi->dev,
+ "enable_dma is set, do not set pio_interrupt\n");
goto error;
}
/*
@@ -1274,7 +1256,7 @@ static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
return 0;
}
-static int __init bfin_spi_probe(struct platform_device *pdev)
+static int bfin_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bfin5xx_spi_master *platform_info;
@@ -1283,7 +1265,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
struct resource *res;
int status = 0;
- platform_info = dev->platform_data;
+ platform_info = dev_get_platdata(dev);
/* Allocate master with space for drv_data */
master = spi_alloc_master(dev, sizeof(*drv_data));
@@ -1300,7 +1282,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
/* the spi->mode bits supported by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
-
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->bus_num = pdev->id;
master->num_chipselect = platform_info->num_chipselect;
master->cleanup = bfin_spi_cleanup;
@@ -1387,7 +1369,7 @@ out_error_get_res:
}
/* stop hardware and remove the driver */
-static int __devexit bfin_spi_remove(struct platform_device *pdev)
+static int bfin_spi_remove(struct platform_device *pdev)
{
struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
int status = 0;
@@ -1419,16 +1401,13 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev)
peripheral_free_list(drv_data->pin_req);
- /* Prevent double remove */
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
-#ifdef CONFIG_PM
-static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int bfin_spi_suspend(struct device *dev)
{
- struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev);
int status = 0;
status = bfin_spi_stop_queue(drv_data);
@@ -1447,9 +1426,9 @@ static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int bfin_spi_resume(struct platform_device *pdev)
+static int bfin_spi_resume(struct device *dev)
{
- struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev);
int status = 0;
bfin_write(&drv_data->regs->ctl, drv_data->ctrl_reg);
@@ -1458,31 +1437,34 @@ static int bfin_spi_resume(struct platform_device *pdev)
/* Start the queue running */
status = bfin_spi_start_queue(drv_data);
if (status != 0) {
- dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
+ dev_err(dev, "problem starting queue (%d)\n", status);
return status;
}
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(bfin_spi_pm_ops, bfin_spi_suspend, bfin_spi_resume);
+
+#define BFIN_SPI_PM_OPS (&bfin_spi_pm_ops)
#else
-#define bfin_spi_suspend NULL
-#define bfin_spi_resume NULL
-#endif /* CONFIG_PM */
+#define BFIN_SPI_PM_OPS NULL
+#endif
MODULE_ALIAS("platform:bfin-spi");
static struct platform_driver bfin_spi_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = BFIN_SPI_PM_OPS,
},
- .suspend = bfin_spi_suspend,
- .resume = bfin_spi_resume,
- .remove = __devexit_p(bfin_spi_remove),
+ .probe = bfin_spi_probe,
+ .remove = bfin_spi_remove,
};
static int __init bfin_spi_init(void)
{
- return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe);
+ return platform_driver_register(&bfin_spi_driver);
}
subsys_initcall(bfin_spi_init);
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index c16bf853c3e..c616e41521b 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -38,7 +38,7 @@
*
* Since this is software, the timings may not be exactly what your board's
* chips need ... there may be several reasons you'd need to tweak timings
- * in these routines, not just make to make it faster or slower to match a
+ * in these routines, not just to make it faster or slower to match a
* particular CPU clock rate.
*/
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index aef59b1a15f..dc7d2c2d643 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -16,7 +16,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
@@ -40,7 +39,7 @@
* to glue code. These bitbang setup() and cleanup() routines are always
* used, though maybe they're called from controller-aware code.
*
- * chipselect() and friends may use use spi_device->controller_data and
+ * chipselect() and friends may use spi_device->controller_data and
* controller registers as appropriate.
*
*
@@ -69,7 +68,7 @@ static unsigned bitbang_txrx_8(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = t->bits_per_word ? : spi->bits_per_word;
+ unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
@@ -95,7 +94,7 @@ static unsigned bitbang_txrx_16(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = t->bits_per_word ? : spi->bits_per_word;
+ unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u16 *tx = t->tx_buf;
u16 *rx = t->rx_buf;
@@ -121,7 +120,7 @@ static unsigned bitbang_txrx_32(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = t->bits_per_word ? : spi->bits_per_word;
+ unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u32 *tx = t->tx_buf;
u32 *rx = t->rx_buf;
@@ -191,7 +190,7 @@ int spi_bitbang_setup(struct spi_device *spi)
bitbang = spi_master_get_devdata(spi->master);
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
@@ -255,155 +254,141 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
* Drivers can provide word-at-a-time i/o primitives, or provide
* transfer-at-a-time ones to leverage dma or fifo hardware.
*/
-static void bitbang_work(struct work_struct *work)
+
+static int spi_bitbang_prepare_hardware(struct spi_master *spi)
{
- struct spi_bitbang *bitbang =
- container_of(work, struct spi_bitbang, work);
+ struct spi_bitbang *bitbang;
unsigned long flags;
+ bitbang = spi_master_get_devdata(spi);
+
spin_lock_irqsave(&bitbang->lock, flags);
bitbang->busy = 1;
- while (!list_empty(&bitbang->queue)) {
- struct spi_message *m;
- struct spi_device *spi;
- unsigned nsecs;
- struct spi_transfer *t = NULL;
- unsigned tmp;
- unsigned cs_change;
- int status;
- int do_setup = -1;
-
- m = container_of(bitbang->queue.next, struct spi_message,
- queue);
- list_del_init(&m->queue);
- spin_unlock_irqrestore(&bitbang->lock, flags);
-
- /* FIXME this is made-up ... the correct value is known to
- * word-at-a-time bitbang code, and presumably chipselect()
- * should enforce these requirements too?
- */
- nsecs = 100;
+ spin_unlock_irqrestore(&bitbang->lock, flags);
- spi = m->spi;
- tmp = 0;
- cs_change = 1;
- status = 0;
+ return 0;
+}
- list_for_each_entry (t, &m->transfers, transfer_list) {
-
- /* override speed or wordsize? */
- if (t->speed_hz || t->bits_per_word)
- do_setup = 1;
-
- /* init (-1) or override (1) transfer params */
- if (do_setup != 0) {
- status = bitbang->setup_transfer(spi, t);
- if (status < 0)
- break;
- if (do_setup == -1)
- do_setup = 0;
- }
-
- /* set up default clock polarity, and activate chip;
- * this implicitly updates clock and spi modes as
- * previously recorded for this device via setup().
- * (and also deselects any other chip that might be
- * selected ...)
- */
- if (cs_change) {
- bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
- ndelay(nsecs);
- }
- cs_change = t->cs_change;
- if (!t->tx_buf && !t->rx_buf && t->len) {
- status = -EINVAL;
- break;
- }
+static int spi_bitbang_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct spi_bitbang *bitbang;
+ unsigned nsecs;
+ struct spi_transfer *t = NULL;
+ unsigned cs_change;
+ int status;
+ int do_setup = -1;
+ struct spi_device *spi = m->spi;
+
+ bitbang = spi_master_get_devdata(master);
+
+ /* FIXME this is made-up ... the correct value is known to
+ * word-at-a-time bitbang code, and presumably chipselect()
+ * should enforce these requirements too?
+ */
+ nsecs = 100;
- /* transfer data. the lower level code handles any
- * new dma mappings it needs. our caller always gave
- * us dma-safe buffers.
- */
- if (t->len) {
- /* REVISIT dma API still needs a designated
- * DMA_ADDR_INVALID; ~0 might be better.
- */
- if (!m->is_dma_mapped)
- t->rx_dma = t->tx_dma = 0;
- status = bitbang->txrx_bufs(spi, t);
- }
- if (status > 0)
- m->actual_length += status;
- if (status != t->len) {
- /* always report some kind of error */
- if (status >= 0)
- status = -EREMOTEIO;
- break;
- }
- status = 0;
+ cs_change = 1;
+ status = 0;
- /* protocol tweaks before next transfer */
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ list_for_each_entry(t, &m->transfers, transfer_list) {
- if (!cs_change)
- continue;
- if (t->transfer_list.next == &m->transfers)
+ /* override speed or wordsize? */
+ if (t->speed_hz || t->bits_per_word)
+ do_setup = 1;
+
+ /* init (-1) or override (1) transfer params */
+ if (do_setup != 0) {
+ status = bitbang->setup_transfer(spi, t);
+ if (status < 0)
break;
+ if (do_setup == -1)
+ do_setup = 0;
+ }
- /* sometimes a short mid-message deselect of the chip
- * may be needed to terminate a mode or command
- */
- ndelay(nsecs);
- bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+ /* set up default clock polarity, and activate chip;
+ * this implicitly updates clock and spi modes as
+ * previously recorded for this device via setup().
+ * (and also deselects any other chip that might be
+ * selected ...)
+ */
+ if (cs_change) {
+ bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
ndelay(nsecs);
}
+ cs_change = t->cs_change;
+ if (!t->tx_buf && !t->rx_buf && t->len) {
+ status = -EINVAL;
+ break;
+ }
- m->status = status;
- m->complete(m->context);
-
- /* normally deactivate chipselect ... unless no error and
- * cs_change has hinted that the next message will probably
- * be for this chip too.
+ /* transfer data. the lower level code handles any
+ * new dma mappings it needs. our caller always gave
+ * us dma-safe buffers.
*/
- if (!(status == 0 && cs_change)) {
+ if (t->len) {
+ /* REVISIT dma API still needs a designated
+ * DMA_ADDR_INVALID; ~0 might be better.
+ */
+ if (!m->is_dma_mapped)
+ t->rx_dma = t->tx_dma = 0;
+ status = bitbang->txrx_bufs(spi, t);
+ }
+ if (status > 0)
+ m->actual_length += status;
+ if (status != t->len) {
+ /* always report some kind of error */
+ if (status >= 0)
+ status = -EREMOTEIO;
+ break;
+ }
+ status = 0;
+
+ /* protocol tweaks before next transfer */
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change &&
+ !list_is_last(&t->transfer_list, &m->transfers)) {
+ /* sometimes a short mid-message deselect of the chip
+ * may be needed to terminate a mode or command
+ */
ndelay(nsecs);
bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
ndelay(nsecs);
}
+ }
+
+ m->status = status;
- spin_lock_irqsave(&bitbang->lock, flags);
+ /* normally deactivate chipselect ... unless no error and
+ * cs_change has hinted that the next message will probably
+ * be for this chip too.
+ */
+ if (!(status == 0 && cs_change)) {
+ ndelay(nsecs);
+ bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+ ndelay(nsecs);
}
- bitbang->busy = 0;
- spin_unlock_irqrestore(&bitbang->lock, flags);
+
+ spi_finalize_current_message(master);
+
+ return status;
}
-/**
- * spi_bitbang_transfer - default submit to transfer queue
- */
-int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m)
+static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
{
struct spi_bitbang *bitbang;
unsigned long flags;
- int status = 0;
- m->actual_length = 0;
- m->status = -EINPROGRESS;
-
- bitbang = spi_master_get_devdata(spi->master);
+ bitbang = spi_master_get_devdata(spi);
spin_lock_irqsave(&bitbang->lock, flags);
- if (!spi->max_speed_hz)
- status = -ENETDOWN;
- else {
- list_add_tail(&m->queue, &bitbang->queue);
- queue_work(bitbang->workqueue, &bitbang->work);
- }
+ bitbang->busy = 0;
spin_unlock_irqrestore(&bitbang->lock, flags);
- return status;
+ return 0;
}
-EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
/*----------------------------------------------------------------------*/
@@ -429,76 +414,61 @@ EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
* This routine registers the spi_master, which will process requests in a
* dedicated task, keeping IRQs unblocked most of the time. To stop
* processing those requests, call spi_bitbang_stop().
+ *
+ * On success, this routine will take a reference to master. The caller is
+ * responsible for calling spi_bitbang_stop() to decrement the reference and
+ * spi_master_put() as counterpart of spi_alloc_master() to prevent a memory
+ * leak.
*/
int spi_bitbang_start(struct spi_bitbang *bitbang)
{
- int status;
+ struct spi_master *master = bitbang->master;
+ int ret;
- if (!bitbang->master || !bitbang->chipselect)
+ if (!master || !bitbang->chipselect)
return -EINVAL;
- INIT_WORK(&bitbang->work, bitbang_work);
spin_lock_init(&bitbang->lock);
- INIT_LIST_HEAD(&bitbang->queue);
- if (!bitbang->master->mode_bits)
- bitbang->master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
+ if (!master->mode_bits)
+ master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
+
+ if (master->transfer || master->transfer_one_message)
+ return -EINVAL;
+
+ master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
+ master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
+ master->transfer_one_message = spi_bitbang_transfer_one;
- if (!bitbang->master->transfer)
- bitbang->master->transfer = spi_bitbang_transfer;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
bitbang->txrx_bufs = spi_bitbang_bufs;
- if (!bitbang->master->setup) {
+ if (!master->setup) {
if (!bitbang->setup_transfer)
bitbang->setup_transfer =
spi_bitbang_setup_transfer;
- bitbang->master->setup = spi_bitbang_setup;
- bitbang->master->cleanup = spi_bitbang_cleanup;
+ master->setup = spi_bitbang_setup;
+ master->cleanup = spi_bitbang_cleanup;
}
- } else if (!bitbang->master->setup)
- return -EINVAL;
- if (bitbang->master->transfer == spi_bitbang_transfer &&
- !bitbang->setup_transfer)
- return -EINVAL;
-
- /* this task is the only thing to touch the SPI bits */
- bitbang->busy = 0;
- bitbang->workqueue = create_singlethread_workqueue(
- dev_name(bitbang->master->dev.parent));
- if (bitbang->workqueue == NULL) {
- status = -EBUSY;
- goto err1;
}
/* driver may get busy before register() returns, especially
* if someone registered boardinfo for devices
*/
- status = spi_register_master(bitbang->master);
- if (status < 0)
- goto err2;
+ ret = spi_register_master(spi_master_get(master));
+ if (ret)
+ spi_master_put(master);
- return status;
-
-err2:
- destroy_workqueue(bitbang->workqueue);
-err1:
- return status;
+ return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_start);
/**
* spi_bitbang_stop - stops the task providing spi communication
*/
-int spi_bitbang_stop(struct spi_bitbang *bitbang)
+void spi_bitbang_stop(struct spi_bitbang *bitbang)
{
spi_unregister_master(bitbang->master);
-
- WARN_ON(!list_empty(&bitbang->queue));
-
- destroy_workqueue(bitbang->workqueue);
-
- return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_stop);
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 5ed08e53743..ee4f91ccd8f 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -147,8 +147,8 @@ static void butterfly_chipselect(struct spi_device *spi, int value)
/* we only needed to implement one mode here, and choose SPI_MODE_0 */
-#define spidelay(X) do{}while(0)
-//#define spidelay ndelay
+#define spidelay(X) do { } while (0)
+/* #define spidelay ndelay */
#include "spi-bitbang-txrx.h"
@@ -171,15 +171,15 @@ static struct mtd_partition partitions[] = { {
/* sector 0 = 8 pages * 264 bytes/page (1 block)
* sector 1 = 248 pages * 264 bytes/page
*/
- .name = "bookkeeping", // 66 KB
+ .name = "bookkeeping", /* 66 KB */
.offset = 0,
.size = (8 + 248) * 264,
-// .mask_flags = MTD_WRITEABLE,
+ /* .mask_flags = MTD_WRITEABLE, */
}, {
/* sector 2 = 256 pages * 264 bytes/page
* sectors 3-5 = 512 pages * 264 bytes/page
*/
- .name = "filesystem", // 462 KB
+ .name = "filesystem", /* 462 KB */
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
} };
@@ -209,7 +209,7 @@ static void butterfly_attach(struct parport *p)
* and no way to be selective about what it binds to.
*/
- master = spi_alloc_master(dev, sizeof *pp);
+ master = spi_alloc_master(dev, sizeof(*pp));
if (!master) {
status = -ENOMEM;
goto done;
@@ -225,7 +225,7 @@ static void butterfly_attach(struct parport *p)
master->bus_num = 42;
master->num_chipselect = 2;
- pp->bitbang.master = spi_master_get(master);
+ pp->bitbang.master = master;
pp->bitbang.chipselect = butterfly_chipselect;
pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0;
@@ -289,7 +289,6 @@ static void butterfly_attach(struct parport *p)
pr_debug("%s: dataflash at %s\n", p->name,
dev_name(&pp->dataflash->dev));
- // dev_info(_what?_, ...)
pr_info("%s: AVR Butterfly\n", p->name);
butterfly = pp;
return;
@@ -310,7 +309,6 @@ done:
static void butterfly_detach(struct parport *p)
{
struct butterfly *pp;
- int status;
/* FIXME this global is ugly ... but, how to quickly get from
* the parport to the "struct butterfly" associated with it?
@@ -322,7 +320,7 @@ static void butterfly_detach(struct parport *p)
butterfly = NULL;
/* stop() unregisters child devices too */
- status = spi_bitbang_stop(&pp->bitbang);
+ spi_bitbang_stop(&pp->bitbang);
/* turn off VCC */
parport_write_data(pp->port, 0);
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
new file mode 100644
index 00000000000..bb758978465
--- /dev/null
+++ b/drivers/spi/spi-cadence.c
@@ -0,0 +1,673 @@
+/*
+ * Cadence SPI controller driver (master mode only)
+ *
+ * Copyright (C) 2008 - 2014 Xilinx, Inc.
+ *
+ * based on Blackfin On-Chip SPI Driver (spi_bfin5xx.c)
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* Name of this driver */
+#define CDNS_SPI_NAME "cdns-spi"
+
+/* Register offset definitions */
+#define CDNS_SPI_CR_OFFSET 0x00 /* Configuration Register, RW */
+#define CDNS_SPI_ISR_OFFSET 0x04 /* Interrupt Status Register, RO */
+#define CDNS_SPI_IER_OFFSET 0x08 /* Interrupt Enable Register, WO */
+#define CDNS_SPI_IDR_OFFSET 0x0c /* Interrupt Disable Register, WO */
+#define CDNS_SPI_IMR_OFFSET 0x10 /* Interrupt Enabled Mask Register, RO */
+#define CDNS_SPI_ER_OFFSET 0x14 /* Enable/Disable Register, RW */
+#define CDNS_SPI_DR_OFFSET 0x18 /* Delay Register, RW */
+#define CDNS_SPI_TXD_OFFSET 0x1C /* Data Transmit Register, WO */
+#define CDNS_SPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */
+#define CDNS_SPI_SICR_OFFSET 0x24 /* Slave Idle Count Register, RW */
+#define CDNS_SPI_THLD_OFFSET 0x28 /* Transmit FIFO Watermark Register,RW */
+
+/*
+ * SPI Configuration Register bit Masks
+ *
+ * This register contains various control bits that affect the operation
+ * of the SPI controller
+ */
+#define CDNS_SPI_CR_MANSTRT_MASK 0x00010000 /* Manual TX Start */
+#define CDNS_SPI_CR_CPHA_MASK 0x00000004 /* Clock Phase Control */
+#define CDNS_SPI_CR_CPOL_MASK 0x00000002 /* Clock Polarity Control */
+#define CDNS_SPI_CR_SSCTRL_MASK 0x00003C00 /* Slave Select Mask */
+#define CDNS_SPI_CR_BAUD_DIV_MASK 0x00000038 /* Baud Rate Divisor Mask */
+#define CDNS_SPI_CR_MSTREN_MASK 0x00000001 /* Master Enable Mask */
+#define CDNS_SPI_CR_MANSTRTEN_MASK 0x00008000 /* Manual TX Enable Mask */
+#define CDNS_SPI_CR_SSFORCE_MASK 0x00004000 /* Manual SS Enable Mask */
+#define CDNS_SPI_CR_BAUD_DIV_4_MASK 0x00000008 /* Default Baud Div Mask */
+#define CDNS_SPI_CR_DEFAULT_MASK (CDNS_SPI_CR_MSTREN_MASK | \
+ CDNS_SPI_CR_SSCTRL_MASK | \
+ CDNS_SPI_CR_SSFORCE_MASK | \
+ CDNS_SPI_CR_BAUD_DIV_4_MASK)
+
+/*
+ * SPI Configuration Register - Baud rate and slave select
+ *
+ * These are the values used in the calculation of baud rate divisor and
+ * setting the slave select.
+ */
+
+#define CDNS_SPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
+#define CDNS_SPI_BAUD_DIV_MIN 1 /* Baud rate divisor minimum */
+#define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
+#define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */
+#define CDNS_SPI_SS0 0x1 /* Slave Select zero */
+
+/*
+ * SPI Interrupt Registers bit Masks
+ *
+ * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
+ * bit definitions.
+ */
+#define CDNS_SPI_IXR_TXOW_MASK 0x00000004 /* SPI TX FIFO Overwater */
+#define CDNS_SPI_IXR_MODF_MASK 0x00000002 /* SPI Mode Fault */
+#define CDNS_SPI_IXR_RXNEMTY_MASK 0x00000010 /* SPI RX FIFO Not Empty */
+#define CDNS_SPI_IXR_DEFAULT_MASK (CDNS_SPI_IXR_TXOW_MASK | \
+ CDNS_SPI_IXR_MODF_MASK)
+#define CDNS_SPI_IXR_TXFULL_MASK 0x00000008 /* SPI TX Full */
+#define CDNS_SPI_IXR_ALL_MASK 0x0000007F /* SPI all interrupts */
+
+/*
+ * SPI Enable Register bit Masks
+ *
+ * This register is used to enable or disable the SPI controller
+ */
+#define CDNS_SPI_ER_ENABLE_MASK 0x00000001 /* SPI Enable Bit Mask */
+#define CDNS_SPI_ER_DISABLE_MASK 0x0 /* SPI Disable Bit Mask */
+
+/* SPI FIFO depth in bytes */
+#define CDNS_SPI_FIFO_DEPTH 128
+
+/* Default number of chip select lines */
+#define CDNS_SPI_DEFAULT_NUM_CS 4
+
+/**
+ * struct cdns_spi - This definition defines spi driver instance
+ * @regs: Virtual address of the SPI controller registers
+ * @ref_clk: Pointer to the peripheral clock
+ * @pclk: Pointer to the APB clock
+ * @speed_hz: Current SPI bus clock speed in Hz
+ * @txbuf: Pointer to the TX buffer
+ * @rxbuf: Pointer to the RX buffer
+ * @tx_bytes: Number of bytes left to transfer
+ * @rx_bytes: Number of bytes requested
+ * @dev_busy: Device busy flag
+ * @is_decoded_cs: Flag for decoder property set or not
+ */
+struct cdns_spi {
+ void __iomem *regs;
+ struct clk *ref_clk;
+ struct clk *pclk;
+ u32 speed_hz;
+ const u8 *txbuf;
+ u8 *rxbuf;
+ int tx_bytes;
+ int rx_bytes;
+ u8 dev_busy;
+ u32 is_decoded_cs;
+};
+
+/* Macros for the SPI controller read/write */
+static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset)
+{
+ return readl_relaxed(xspi->regs + offset);
+}
+
+static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val)
+{
+ writel_relaxed(val, xspi->regs + offset);
+}
+
+/**
+ * cdns_spi_init_hw - Initialize the hardware and configure the SPI controller
+ * @xspi: Pointer to the cdns_spi structure
+ *
+ * On reset the SPI controller is configured to be in master mode, baud rate
+ * divisor is set to 4, threshold value for TX FIFO not full interrupt is set
+ * to 1 and size of the word to be transferred as 8 bit.
+ * This function initializes the SPI controller to disable and clear all the
+ * interrupts, enable manual slave select and manual start, deselect all the
+ * chip select lines, and enable the SPI controller.
+ */
+static void cdns_spi_init_hw(struct cdns_spi *xspi)
+{
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_DISABLE_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
+ CDNS_SPI_IXR_ALL_MASK);
+
+ /* Clear the RX FIFO */
+ while (cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET) &
+ CDNS_SPI_IXR_RXNEMTY_MASK)
+ cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
+
+ cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET,
+ CDNS_SPI_IXR_ALL_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET,
+ CDNS_SPI_CR_DEFAULT_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_ENABLE_MASK);
+}
+
+/**
+ * cdns_spi_chipselect - Select or deselect the chip select line
+ * @spi: Pointer to the spi_device structure
+ * @is_on: Select(0) or deselect (1) the chip select line
+ */
+static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ctrl_reg;
+
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+
+ if (is_high) {
+ /* Deselect the slave */
+ ctrl_reg |= CDNS_SPI_CR_SSCTRL_MASK;
+ } else {
+ /* Select the slave */
+ ctrl_reg &= ~CDNS_SPI_CR_SSCTRL_MASK;
+ if (!(xspi->is_decoded_cs))
+ ctrl_reg |= ((~(CDNS_SPI_SS0 << spi->chip_select)) <<
+ CDNS_SPI_SS_SHIFT) &
+ CDNS_SPI_CR_SSCTRL_MASK;
+ else
+ ctrl_reg |= (spi->chip_select << CDNS_SPI_SS_SHIFT) &
+ CDNS_SPI_CR_SSCTRL_MASK;
+ }
+
+ cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
+}
+
+/**
+ * cdns_spi_config_clock_mode - Sets clock polarity and phase
+ * @spi: Pointer to the spi_device structure
+ *
+ * Sets the requested clock polarity and phase.
+ */
+static void cdns_spi_config_clock_mode(struct spi_device *spi)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ctrl_reg;
+
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+
+ /* Set the SPI clock phase and clock polarity */
+ ctrl_reg &= ~(CDNS_SPI_CR_CPHA_MASK | CDNS_SPI_CR_CPOL_MASK);
+ if (spi->mode & SPI_CPHA)
+ ctrl_reg |= CDNS_SPI_CR_CPHA_MASK;
+ if (spi->mode & SPI_CPOL)
+ ctrl_reg |= CDNS_SPI_CR_CPOL_MASK;
+
+ cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
+}
+
+/**
+ * cdns_spi_config_clock_freq - Sets clock frequency
+ * @spi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer setup parameters
+ *
+ * Sets the requested clock frequency.
+ * Note: If the requested frequency is not an exact match with what can be
+ * obtained using the prescalar value the driver sets the clock frequency which
+ * is lower than the requested frequency (maximum lower) for the transfer. If
+ * the requested frequency is higher or lower than that is supported by the SPI
+ * controller the driver will set the highest or lowest frequency supported by
+ * controller.
+ */
+static void cdns_spi_config_clock_freq(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ctrl_reg, baud_rate_val;
+ unsigned long frequency;
+
+ frequency = clk_get_rate(xspi->ref_clk);
+
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+
+ /* Set the clock frequency */
+ if (xspi->speed_hz != transfer->speed_hz) {
+ /* first valid value is 1 */
+ baud_rate_val = CDNS_SPI_BAUD_DIV_MIN;
+ while ((baud_rate_val < CDNS_SPI_BAUD_DIV_MAX) &&
+ (frequency / (2 << baud_rate_val)) > transfer->speed_hz)
+ baud_rate_val++;
+
+ ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV_MASK;
+ ctrl_reg |= baud_rate_val << CDNS_SPI_BAUD_DIV_SHIFT;
+
+ xspi->speed_hz = frequency / (2 << baud_rate_val);
+ }
+ cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
+}
+
+/**
+ * cdns_spi_setup_transfer - Configure SPI controller for specified transfer
+ * @spi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer setup parameters
+ *
+ * Sets the operational mode of SPI controller for the next SPI transfer and
+ * sets the requested clock frequency.
+ *
+ * Return: Always 0
+ */
+static int cdns_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+
+ cdns_spi_config_clock_freq(spi, transfer);
+
+ dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u clock speed\n",
+ __func__, spi->mode, spi->bits_per_word,
+ xspi->speed_hz);
+
+ return 0;
+}
+
+/**
+ * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
+ * @xspi: Pointer to the cdns_spi structure
+ */
+static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
+{
+ unsigned long trans_cnt = 0;
+
+ while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
+ (xspi->tx_bytes > 0)) {
+ if (xspi->txbuf)
+ cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET,
+ *xspi->txbuf++);
+ else
+ cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET, 0);
+
+ xspi->tx_bytes--;
+ trans_cnt++;
+ }
+}
+
+/**
+ * cdns_spi_irq - Interrupt service routine of the SPI controller
+ * @irq: IRQ number
+ * @dev_id: Pointer to the xspi structure
+ *
+ * This function handles TX empty and Mode Fault interrupts only.
+ * On TX empty interrupt this function reads the received data from RX FIFO and
+ * fills the TX FIFO if there is any data remaining to be transferred.
+ * On Mode Fault interrupt this function indicates that transfer is completed,
+ * the SPI subsystem will identify the error as the remaining bytes to be
+ * transferred is non-zero.
+ *
+ * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise.
+ */
+static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+ u32 intr_status, status;
+
+ status = IRQ_NONE;
+ intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET);
+ cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET, intr_status);
+
+ if (intr_status & CDNS_SPI_IXR_MODF_MASK) {
+ /* Indicate that transfer is completed, the SPI subsystem will
+ * identify the error as the remaining bytes to be
+ * transferred is non-zero
+ */
+ cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
+ CDNS_SPI_IXR_DEFAULT_MASK);
+ spi_finalize_current_transfer(master);
+ status = IRQ_HANDLED;
+ } else if (intr_status & CDNS_SPI_IXR_TXOW_MASK) {
+ unsigned long trans_cnt;
+
+ trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
+
+ /* Read out the data from the RX FIFO */
+ while (trans_cnt) {
+ u8 data;
+
+ data = cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
+ if (xspi->rxbuf)
+ *xspi->rxbuf++ = data;
+
+ xspi->rx_bytes--;
+ trans_cnt--;
+ }
+
+ if (xspi->tx_bytes) {
+ /* There is more data to send */
+ cdns_spi_fill_tx_fifo(xspi);
+ } else {
+ /* Transfer is completed */
+ cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
+ CDNS_SPI_IXR_DEFAULT_MASK);
+ spi_finalize_current_transfer(master);
+ }
+ status = IRQ_HANDLED;
+ }
+
+ return status;
+}
+
+/**
+ * cdns_transfer_one - Initiates the SPI transfer
+ * @master: Pointer to spi_master structure
+ * @spi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer parameters
+ *
+ * This function fills the TX FIFO, starts the SPI transfer and
+ * returns a positive transfer count so that core will wait for completion.
+ *
+ * Return: Number of bytes transferred in the last transfer
+ */
+static int cdns_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ xspi->txbuf = transfer->tx_buf;
+ xspi->rxbuf = transfer->rx_buf;
+ xspi->tx_bytes = transfer->len;
+ xspi->rx_bytes = transfer->len;
+
+ cdns_spi_setup_transfer(spi, transfer);
+
+ cdns_spi_fill_tx_fifo(xspi);
+
+ cdns_spi_write(xspi, CDNS_SPI_IER_OFFSET,
+ CDNS_SPI_IXR_DEFAULT_MASK);
+ return transfer->len;
+}
+
+/**
+ * cdns_prepare_transfer_hardware - Prepares hardware for transfer.
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function enables SPI master controller.
+ *
+ * Return: 0 always
+ */
+static int cdns_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ cdns_spi_config_clock_mode(master->cur_msg->spi);
+
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_ENABLE_MASK);
+
+ return 0;
+}
+
+/**
+ * cdns_unprepare_transfer_hardware - Relaxes hardware after transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function disables the SPI master controller.
+ *
+ * Return: 0 always
+ */
+static int cdns_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_DISABLE_MASK);
+
+ return 0;
+}
+
+/**
+ * cdns_spi_probe - Probe method for the SPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success and error value on error
+ */
+static int cdns_spi_probe(struct platform_device *pdev)
+{
+ int ret = 0, irq;
+ struct spi_master *master;
+ struct cdns_spi *xspi;
+ struct resource *res;
+ u32 num_cs;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*xspi));
+ if (master == NULL)
+ return -ENOMEM;
+
+ xspi = spi_master_get_devdata(master);
+ master->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xspi->regs)) {
+ ret = PTR_ERR(xspi->regs);
+ goto remove_master;
+ }
+
+ xspi->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(xspi->pclk)) {
+ dev_err(&pdev->dev, "pclk clock not found.\n");
+ ret = PTR_ERR(xspi->pclk);
+ goto remove_master;
+ }
+
+ xspi->ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(xspi->ref_clk)) {
+ dev_err(&pdev->dev, "ref_clk clock not found.\n");
+ ret = PTR_ERR(xspi->ref_clk);
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xspi->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable APB clock.\n");
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xspi->ref_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable device clock.\n");
+ goto clk_dis_apb;
+ }
+
+ /* SPI controller initializations */
+ cdns_spi_init_hw(xspi);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "irq number is invalid\n");
+ goto remove_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq,
+ 0, pdev->name, master);
+ if (ret != 0) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto remove_master;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+
+ if (ret < 0)
+ master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
+ else
+ master->num_chipselect = num_cs;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
+ &xspi->is_decoded_cs);
+
+ if (ret < 0)
+ xspi->is_decoded_cs = 0;
+
+ master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
+ master->transfer_one = cdns_transfer_one;
+ master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
+ master->set_cs = cdns_spi_chipselect;
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+
+ /* Set to default valid value */
+ master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
+ xspi->speed_hz = master->max_speed_hz;
+
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto clk_dis_all;
+ }
+
+ return ret;
+
+clk_dis_all:
+ clk_disable_unprepare(xspi->ref_clk);
+clk_dis_apb:
+ clk_disable_unprepare(xspi->pclk);
+remove_master:
+ spi_master_put(master);
+ return ret;
+}
+
+/**
+ * cdns_spi_remove - Remove method for the SPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 on success and error value on error
+ */
+static int cdns_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_DISABLE_MASK);
+
+ clk_disable_unprepare(xspi->ref_clk);
+ clk_disable_unprepare(xspi->pclk);
+
+ spi_unregister_master(master);
+
+ return 0;
+}
+
+/**
+ * cdns_spi_suspend - Suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function disables the SPI controller and
+ * changes the driver state to "suspend"
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused cdns_spi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+
+ clk_disable_unprepare(xspi->ref_clk);
+
+ clk_disable_unprepare(xspi->pclk);
+
+ return 0;
+}
+
+/**
+ * cdns_spi_resume - Resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function changes the driver state to "ready"
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused cdns_spi_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+ int ret = 0;
+
+ ret = clk_prepare_enable(xspi->pclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable APB clock.\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xspi->ref_clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable device clock.\n");
+ clk_disable(xspi->pclk);
+ return ret;
+ }
+ spi_master_resume(master);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cdns_spi_dev_pm_ops, cdns_spi_suspend,
+ cdns_spi_resume);
+
+static struct of_device_id cdns_spi_of_match[] = {
+ { .compatible = "xlnx,zynq-spi-r1p6" },
+ { .compatible = "cdns,spi-r1p6" },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, cdns_spi_of_match);
+
+/* cdns_spi_driver - This structure defines the SPI subsystem platform driver */
+static struct platform_driver cdns_spi_driver = {
+ .probe = cdns_spi_probe,
+ .remove = cdns_spi_remove,
+ .driver = {
+ .name = CDNS_SPI_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cdns_spi_of_match,
+ .pm = &cdns_spi_dev_pm_ops,
+ },
+};
+
+module_platform_driver(cdns_spi_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Cadence SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
new file mode 100644
index 00000000000..4cd62f63654
--- /dev/null
+++ b/drivers/spi/spi-clps711x.c
@@ -0,0 +1,247 @@
+/*
+ * CLPS711X SPI bus driver
+ *
+ * Copyright (C) 2012-2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/clps711x.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_data/spi-clps711x.h>
+
+#define DRIVER_NAME "spi-clps711x"
+
+#define SYNCIO_FRMLEN(x) ((x) << 8)
+#define SYNCIO_TXFRMEN (1 << 14)
+
+struct spi_clps711x_data {
+ void __iomem *syncio;
+ struct regmap *syscon;
+ struct regmap *syscon1;
+ struct clk *spi_clk;
+
+ u8 *tx_buf;
+ u8 *rx_buf;
+ unsigned int bpw;
+ int len;
+};
+
+static int spi_clps711x_setup(struct spi_device *spi)
+{
+ /* We are expect that SPI-device is not selected */
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+
+ return 0;
+}
+
+static void spi_clps711x_setup_xfer(struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_master *master = spi->master;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+
+ /* Setup SPI frequency divider */
+ if (xfer->speed_hz >= master->max_speed_hz)
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(3));
+ else if (xfer->speed_hz >= (master->max_speed_hz / 2))
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(2));
+ else if (xfer->speed_hz >= (master->max_speed_hz / 8))
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(1));
+ else
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(0));
+}
+
+static int spi_clps711x_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+
+ /* Setup mode for transfer */
+ return regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCKNSEN,
+ (spi->mode & SPI_CPHA) ?
+ SYSCON3_ADCCKNSEN : 0);
+}
+
+static int spi_clps711x_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
+
+ spi_clps711x_setup_xfer(spi, xfer);
+
+ hw->len = xfer->len;
+ hw->bpw = xfer->bits_per_word;
+ hw->tx_buf = (u8 *)xfer->tx_buf;
+ hw->rx_buf = (u8 *)xfer->rx_buf;
+
+ /* Initiate transfer */
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN, hw->syncio);
+
+ return 1;
+}
+
+static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
+
+ /* Handle RX */
+ data = readb(hw->syncio);
+ if (hw->rx_buf)
+ *hw->rx_buf++ = data;
+
+ /* Handle TX */
+ if (--hw->len > 0) {
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN,
+ hw->syncio);
+ } else
+ spi_finalize_current_transfer(master);
+
+ return IRQ_HANDLED;
+}
+
+static int spi_clps711x_probe(struct platform_device *pdev)
+{
+ struct spi_clps711x_data *hw;
+ struct spi_clps711x_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct spi_master *master;
+ struct resource *res;
+ int i, irq, ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ return -EINVAL;
+ }
+
+ if (pdata->num_chipselect < 1) {
+ dev_err(&pdev->dev, "At least one CS must be defined\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*hw));
+ if (!master)
+ return -ENOMEM;
+
+ master->cs_gpios = devm_kzalloc(&pdev->dev, sizeof(int) *
+ pdata->num_chipselect, GFP_KERNEL);
+ if (!master->cs_gpios) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
+ master->num_chipselect = pdata->num_chipselect;
+ master->setup = spi_clps711x_setup;
+ master->prepare_message = spi_clps711x_prepare_message;
+ master->transfer_one = spi_clps711x_transfer_one;
+
+ hw = spi_master_get_devdata(master);
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ master->cs_gpios[i] = pdata->chipselect[i];
+ ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
+ DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get CS GPIO %i\n", i);
+ goto err_out;
+ }
+ }
+
+ hw->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(hw->spi_clk)) {
+ dev_err(&pdev->dev, "Can't get clocks\n");
+ ret = PTR_ERR(hw->spi_clk);
+ goto err_out;
+ }
+ master->max_speed_hz = clk_get_rate(hw->spi_clk);
+
+ platform_set_drvdata(pdev, master);
+
+ hw->syscon = syscon_regmap_lookup_by_pdevname("syscon.3");
+ if (IS_ERR(hw->syscon)) {
+ ret = PTR_ERR(hw->syscon);
+ goto err_out;
+ }
+
+ hw->syscon1 = syscon_regmap_lookup_by_pdevname("syscon.1");
+ if (IS_ERR(hw->syscon1)) {
+ ret = PTR_ERR(hw->syscon1);
+ goto err_out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->syncio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->syncio)) {
+ ret = PTR_ERR(hw->syncio);
+ goto err_out;
+ }
+
+ /* Disable extended mode due hardware problems */
+ regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCON, 0);
+
+ /* Clear possible pending interrupt */
+ readl(hw->syncio);
+
+ ret = devm_request_irq(&pdev->dev, irq, spi_clps711x_isr, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ goto err_out;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (!ret) {
+ dev_info(&pdev->dev,
+ "SPI bus driver initialized. Master clock %u Hz\n",
+ master->max_speed_hz);
+ return 0;
+ }
+
+ dev_err(&pdev->dev, "Failed to register master\n");
+
+err_out:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static struct platform_driver clps711x_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = spi_clps711x_probe,
+};
+module_platform_driver(clps711x_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("CLPS711X SPI bus driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 764bfee7592..e2fa628e55e 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -77,8 +77,6 @@ struct mcfqspi {
struct mcfqspi_cs_control *cs_control;
wait_queue_head_t waitq;
-
- struct device *dev;
};
static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
@@ -135,13 +133,13 @@ static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select,
static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi)
{
- return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ?
+ return (mcfqspi->cs_control->setup) ?
mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0;
}
static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi)
{
- if (mcfqspi->cs_control && mcfqspi->cs_control->teardown)
+ if (mcfqspi->cs_control->teardown)
mcfqspi->cs_control->teardown(mcfqspi->cs_control);
}
@@ -300,95 +298,45 @@ static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
}
}
-static int mcfqspi_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static void mcfqspi_set_cs(struct spi_device *spi, bool enable)
{
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- struct spi_device *spi = msg->spi;
- struct spi_transfer *t;
- int status = 0;
-
- list_for_each_entry(t, &msg->transfers, transfer_list) {
- bool cs_high = spi->mode & SPI_CS_HIGH;
- u16 qmr = MCFQSPI_QMR_MSTR;
-
- if (t->bits_per_word)
- qmr |= t->bits_per_word << 10;
- else
- qmr |= spi->bits_per_word << 10;
- if (spi->mode & SPI_CPHA)
- qmr |= MCFQSPI_QMR_CPHA;
- if (spi->mode & SPI_CPOL)
- qmr |= MCFQSPI_QMR_CPOL;
- if (t->speed_hz)
- qmr |= mcfqspi_qmr_baud(t->speed_hz);
- else
- qmr |= mcfqspi_qmr_baud(spi->max_speed_hz);
- mcfqspi_wr_qmr(mcfqspi, qmr);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(spi->master);
+ bool cs_high = spi->mode & SPI_CS_HIGH;
+ if (enable)
mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
-
- mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
- if ((t->bits_per_word ? t->bits_per_word :
- spi->bits_per_word) == 8)
- mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf,
- t->rx_buf);
- else
- mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
- t->rx_buf);
- mcfqspi_wr_qir(mcfqspi, 0);
-
- if (t->delay_usecs)
- udelay(t->delay_usecs);
- if (t->cs_change) {
- if (!list_is_last(&t->transfer_list, &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
- cs_high);
- } else {
- if (list_is_last(&t->transfer_list, &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
- cs_high);
- }
- msg->actual_length += t->len;
- }
- msg->status = status;
- spi_finalize_current_message(master);
-
- return status;
-
-}
-
-static int mcfqspi_prepare_transfer_hw(struct spi_master *master)
-{
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
-
- pm_runtime_get_sync(mcfqspi->dev);
-
- return 0;
+ else
+ mcfqspi_cs_deselect(mcfqspi, spi->chip_select, cs_high);
}
-static int mcfqspi_unprepare_transfer_hw(struct spi_master *master)
+static int mcfqspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
-
- pm_runtime_put_sync(mcfqspi->dev);
+ u16 qmr = MCFQSPI_QMR_MSTR;
+
+ qmr |= t->bits_per_word << 10;
+ if (spi->mode & SPI_CPHA)
+ qmr |= MCFQSPI_QMR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ qmr |= MCFQSPI_QMR_CPOL;
+ qmr |= mcfqspi_qmr_baud(t->speed_hz);
+ mcfqspi_wr_qmr(mcfqspi, qmr);
+
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
+ if (t->bits_per_word == 8)
+ mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf, t->rx_buf);
+ else
+ mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
+ t->rx_buf);
+ mcfqspi_wr_qir(mcfqspi, 0);
return 0;
}
static int mcfqspi_setup(struct spi_device *spi)
{
- if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) {
- dev_dbg(&spi->dev, "%d bits per word is not supported\n",
- spi->bits_per_word);
- return -EINVAL;
- }
- if (spi->chip_select >= spi->master->num_chipselect) {
- dev_dbg(&spi->dev, "%d chip select is out of range\n",
- spi->chip_select);
- return -EINVAL;
- }
-
mcfqspi_cs_deselect(spi_master_get_devdata(spi->master),
spi->chip_select, spi->mode & SPI_CS_HIGH);
@@ -401,7 +349,7 @@ static int mcfqspi_setup(struct spi_device *spi)
return 0;
}
-static int __devinit mcfqspi_probe(struct platform_device *pdev)
+static int mcfqspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct mcfqspi *mcfqspi;
@@ -409,6 +357,17 @@ static int __devinit mcfqspi_probe(struct platform_device *pdev)
struct mcfqspi_platform_data *pdata;
int status;
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "platform data is missing\n");
+ return -ENOENT;
+ }
+
+ if (!pdata->cs_control) {
+ dev_dbg(&pdev->dev, "pdata->cs_control is NULL\n");
+ return -EINVAL;
+ }
+
master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi));
if (master == NULL) {
dev_dbg(&pdev->dev, "spi_alloc_master failed\n");
@@ -418,52 +377,34 @@ static int __devinit mcfqspi_probe(struct platform_device *pdev)
mcfqspi = spi_master_get_devdata(master);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_dbg(&pdev->dev, "platform_get_resource failed\n");
- status = -ENXIO;
+ mcfqspi->iobase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcfqspi->iobase)) {
+ status = PTR_ERR(mcfqspi->iobase);
goto fail0;
}
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_dbg(&pdev->dev, "request_mem_region failed\n");
- status = -EBUSY;
- goto fail0;
- }
-
- mcfqspi->iobase = ioremap(res->start, resource_size(res));
- if (!mcfqspi->iobase) {
- dev_dbg(&pdev->dev, "ioremap failed\n");
- status = -ENOMEM;
- goto fail1;
- }
-
mcfqspi->irq = platform_get_irq(pdev, 0);
if (mcfqspi->irq < 0) {
dev_dbg(&pdev->dev, "platform_get_irq failed\n");
status = -ENXIO;
- goto fail2;
+ goto fail0;
}
- status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, 0,
- pdev->name, mcfqspi);
+ status = devm_request_irq(&pdev->dev, mcfqspi->irq, mcfqspi_irq_handler,
+ 0, pdev->name, mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "request_irq failed\n");
- goto fail2;
+ goto fail0;
}
- mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk");
+ mcfqspi->clk = devm_clk_get(&pdev->dev, "qspi_clk");
if (IS_ERR(mcfqspi->clk)) {
dev_dbg(&pdev->dev, "clk_get failed\n");
status = PTR_ERR(mcfqspi->clk);
- goto fail3;
+ goto fail0;
}
clk_enable(mcfqspi->clk);
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_dbg(&pdev->dev, "platform data is missing\n");
- goto fail4;
- }
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
@@ -471,42 +412,35 @@ static int __devinit mcfqspi_probe(struct platform_device *pdev)
status = mcfqspi_cs_setup(mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "error initializing cs_control\n");
- goto fail4;
+ goto fail1;
}
init_waitqueue_head(&mcfqspi->waitq);
- mcfqspi->dev = &pdev->dev;
master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
master->setup = mcfqspi_setup;
- master->transfer_one_message = mcfqspi_transfer_one_message;
- master->prepare_transfer_hardware = mcfqspi_prepare_transfer_hw;
- master->unprepare_transfer_hardware = mcfqspi_unprepare_transfer_hw;
+ master->set_cs = mcfqspi_set_cs;
+ master->transfer_one = mcfqspi_transfer_one;
+ master->auto_runtime_pm = true;
platform_set_drvdata(pdev, master);
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status) {
dev_dbg(&pdev->dev, "spi_register_master failed\n");
- goto fail5;
+ goto fail2;
}
- pm_runtime_enable(mcfqspi->dev);
+ pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
return 0;
-fail5:
- mcfqspi_cs_teardown(mcfqspi);
-fail4:
- clk_disable(mcfqspi->clk);
- clk_put(mcfqspi->clk);
-fail3:
- free_irq(mcfqspi->irq, mcfqspi);
fail2:
- iounmap(mcfqspi->iobase);
+ mcfqspi_cs_teardown(mcfqspi);
fail1:
- release_mem_region(res->start, resource_size(res));
+ clk_disable(mcfqspi->clk);
fail0:
spi_master_put(master);
@@ -515,24 +449,17 @@ fail0:
return status;
}
-static int __devexit mcfqspi_remove(struct platform_device *pdev)
+static int mcfqspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pm_runtime_disable(mcfqspi->dev);
+ pm_runtime_disable(&pdev->dev);
/* disable the hardware (set the baud rate to 0) */
mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
- platform_set_drvdata(pdev, NULL);
mcfqspi_cs_teardown(mcfqspi);
clk_disable(mcfqspi->clk);
- clk_put(mcfqspi->clk);
- free_irq(mcfqspi->irq, mcfqspi);
- iounmap(mcfqspi->iobase);
- release_mem_region(res->start, resource_size(res));
- spi_unregister_master(master);
return 0;
}
@@ -542,8 +469,11 @@ static int mcfqspi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ int ret;
- spi_master_suspend(master);
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
clk_disable(mcfqspi->clk);
@@ -555,18 +485,17 @@ static int mcfqspi_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- spi_master_resume(master);
-
clk_enable(mcfqspi->clk);
- return 0;
+ return spi_master_resume(master);
}
#endif
#ifdef CONFIG_PM_RUNTIME
static int mcfqspi_runtime_suspend(struct device *dev)
{
- struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
clk_disable(mcfqspi->clk);
@@ -575,7 +504,8 @@ static int mcfqspi_runtime_suspend(struct device *dev)
static int mcfqspi_runtime_resume(struct device *dev)
{
- struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
clk_enable(mcfqspi->clk);
@@ -594,7 +524,7 @@ static struct platform_driver mcfqspi_driver = {
.driver.owner = THIS_MODULE,
.driver.pm = &mcfqspi_pm,
.probe = mcfqspi_probe,
- .remove = __devexit_p(mcfqspi_remove),
+ .remove = mcfqspi_remove,
};
module_platform_driver(mcfqspi_driver);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 147dfa87a64..50f75098925 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -28,6 +28,8 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/edma.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/slab.h>
@@ -135,7 +137,7 @@ struct davinci_spi {
int dma_rx_chnum;
int dma_tx_chnum;
- struct davinci_spi_platform_data *pdata;
+ struct davinci_spi_platform_data pdata;
void (*get_rx)(u32 rx_data, struct davinci_spi *);
u32 (*get_tx)(struct davinci_spi *);
@@ -213,7 +215,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
bool gpio_chipsel = false;
dspi = spi_master_get_devdata(spi->master);
- pdata = dspi->pdata;
+ pdata = &dspi->pdata;
if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
@@ -277,7 +279,8 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
struct davinci_spi *dspi;
struct davinci_spi_config *spicfg;
u8 bits_per_word = 0;
- u32 hz = 0, spifmt = 0, prescale = 0;
+ u32 hz = 0, spifmt = 0;
+ int prescale;
dspi = spi_master_get_devdata(spi->master);
spicfg = (struct davinci_spi_config *)spi->controller_data;
@@ -297,16 +300,15 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
* Assign function pointer to appropriate transfer method
* 8bit, 16bit or 32bit transfer
*/
- if (bits_per_word <= 8 && bits_per_word >= 2) {
+ if (bits_per_word <= 8) {
dspi->get_rx = davinci_spi_rx_buf_u8;
dspi->get_tx = davinci_spi_tx_buf_u8;
dspi->bytes_per_word[spi->chip_select] = 1;
- } else if (bits_per_word <= 16 && bits_per_word >= 2) {
+ } else {
dspi->get_rx = davinci_spi_rx_buf_u16;
dspi->get_tx = davinci_spi_tx_buf_u16;
dspi->bytes_per_word[spi->chip_select] = 2;
- } else
- return -EINVAL;
+ }
if (!hz)
hz = spi->max_speed_hz;
@@ -392,11 +394,7 @@ static int davinci_spi_setup(struct spi_device *spi)
struct davinci_spi_platform_data *pdata;
dspi = spi_master_get_devdata(spi->master);
- pdata = dspi->pdata;
-
- /* if bits per word length is zero then set it default 8 */
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
+ pdata = &dspi->pdata;
if (!(spi->mode & SPI_NO_CS)) {
if ((pdata->chip_sel == NULL) ||
@@ -534,7 +532,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
struct scatterlist sg_rx, sg_tx;
dspi = spi_master_get_devdata(spi->master);
- pdata = dspi->pdata;
+ pdata = &dspi->pdata;
spicfg = (struct davinci_spi_config *)spi->controller_data;
if (!spicfg)
spicfg = &davinci_spi_default_cfg;
@@ -552,7 +550,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
- INIT_COMPLETION(dspi->done);
+ reinit_completion(&dspi->done);
if (spicfg->io_type == SPI_IO_TYPE_INTR)
set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
@@ -608,7 +606,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
else
buf = (void *)t->tx_buf;
t->tx_dma = dma_map_single(&spi->dev, buf,
- t->len, DMA_FROM_DEVICE);
+ t->len, DMA_TO_DEVICE);
if (!t->tx_dma) {
ret = -EFAULT;
goto err_tx_map;
@@ -700,6 +698,19 @@ err_alloc_dummy_buf:
}
/**
+ * dummy_thread_fn - dummy thread function
+ * @irq: IRQ number for this SPI Master
+ * @context_data: structure for SPI Master controller davinci_spi
+ *
+ * This is to satisfy the request_threaded_irq() API so that the irq
+ * handler is called in interrupt context.
+ */
+static irqreturn_t dummy_thread_fn(s32 irq, void *data)
+{
+ return IRQ_HANDLED;
+}
+
+/**
* davinci_spi_irq - Interrupt handler for SPI Master Controller
* @irq: IRQ number for this SPI Master
* @context_data: structure for SPI Master controller davinci_spi
@@ -758,6 +769,68 @@ rx_dma_failed:
return r;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id davinci_spi_of_match[] = {
+ {
+ .compatible = "ti,dm6441-spi",
+ },
+ {
+ .compatible = "ti,da830-spi",
+ .data = (void *)SPI_VERSION_2,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
+
+/**
+ * spi_davinci_get_pdata - Get platform data from DTS binding
+ * @pdev: ptr to platform data
+ * @dspi: ptr to driver data
+ *
+ * Parses and populates pdata in dspi from device tree bindings.
+ *
+ * NOTE: Not all platform data params are supported currently.
+ */
+static int spi_davinci_get_pdata(struct platform_device *pdev,
+ struct davinci_spi *dspi)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct davinci_spi_platform_data *pdata;
+ unsigned int num_cs, intr_line = 0;
+ const struct of_device_id *match;
+
+ pdata = &dspi->pdata;
+
+ pdata->version = SPI_VERSION_1;
+ match = of_match_device(davinci_spi_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ /* match data has the SPI version number for SPI_VERSION_2 */
+ if (match->data == (void *)SPI_VERSION_2)
+ pdata->version = SPI_VERSION_2;
+
+ /*
+ * default num_cs is 1 and all chipsel are internal to the chip
+ * indicated by chip_sel being NULL. GPIO based CS is not
+ * supported yet in DT bindings.
+ */
+ num_cs = 1;
+ of_property_read_u32(node, "num-cs", &num_cs);
+ pdata->num_chipselect = num_cs;
+ of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line);
+ pdata->intr_line = intr_line;
+ return 0;
+}
+#else
+static struct davinci_spi_platform_data
+ *spi_davinci_get_pdata(struct platform_device *pdev,
+ struct davinci_spi *dspi)
+{
+ return -ENODEV;
+}
+#endif
+
/**
* davinci_spi_probe - probe function for SPI Master Controller
* @pdev: platform_device structure which contains plateform specific data
@@ -769,37 +842,40 @@ rx_dma_failed:
* It will invoke spi_bitbang_start to create work queue so that client driver
* can register transfer method to work queue.
*/
-static int __devinit davinci_spi_probe(struct platform_device *pdev)
+static int davinci_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct davinci_spi *dspi;
struct davinci_spi_platform_data *pdata;
- struct resource *r, *mem;
+ struct resource *r;
resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
int i = 0, ret = 0;
u32 spipc0;
- pdata = pdev->dev.platform_data;
- if (pdata == NULL) {
- ret = -ENODEV;
- goto err;
- }
-
master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
if (master == NULL) {
ret = -ENOMEM;
goto err;
}
- dev_set_drvdata(&pdev->dev, master);
+ platform_set_drvdata(pdev, master);
dspi = spi_master_get_devdata(master);
- if (dspi == NULL) {
- ret = -ENOENT;
- goto free_master;
+
+ if (dev_get_platdata(&pdev->dev)) {
+ pdata = dev_get_platdata(&pdev->dev);
+ dspi->pdata = *pdata;
+ } else {
+ /* update dspi pdata with that from the DT */
+ ret = spi_davinci_get_pdata(pdev, dspi);
+ if (ret < 0)
+ goto free_master;
}
+ /* pdata in dspi is now updated and point pdata to that */
+ pdata = &dspi->pdata;
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
ret = -ENOENT;
@@ -807,46 +883,37 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
}
dspi->pbase = r->start;
- dspi->pdata = pdata;
- mem = request_mem_region(r->start, resource_size(r), pdev->name);
- if (mem == NULL) {
- ret = -EBUSY;
+ dspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(dspi->base)) {
+ ret = PTR_ERR(dspi->base);
goto free_master;
}
- dspi->base = ioremap(r->start, resource_size(r));
- if (dspi->base == NULL) {
- ret = -ENOMEM;
- goto release_region;
- }
-
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq <= 0) {
ret = -EINVAL;
- goto unmap_io;
+ goto free_master;
}
- ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev),
- dspi);
+ ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
+ dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
if (ret)
- goto unmap_io;
+ goto free_master;
- dspi->bitbang.master = spi_master_get(master);
- if (dspi->bitbang.master == NULL) {
- ret = -ENODEV;
- goto irq_free;
- }
+ dspi->bitbang.master = master;
- dspi->clk = clk_get(&pdev->dev, NULL);
+ dspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dspi->clk)) {
ret = -ENODEV;
- goto put_master;
+ goto free_master;
}
- clk_enable(dspi->clk);
+ clk_prepare_enable(dspi->clk);
+ master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
master->num_chipselect = pdata->num_chipselect;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
master->setup = davinci_spi_setup;
dspi->bitbang.chipselect = davinci_spi_chipselect;
@@ -876,8 +943,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
goto free_clk;
dev_info(&pdev->dev, "DMA: supported\n");
- dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
- "event queue: %d\n", dma_rx_chan, dma_tx_chan,
+ dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, "
+ "event queue: %d\n", &dma_rx_chan, &dma_tx_chan,
pdata->dma_event_q);
}
@@ -927,18 +994,9 @@ free_dma:
dma_release_channel(dspi->dma_rx);
dma_release_channel(dspi->dma_tx);
free_clk:
- clk_disable(dspi->clk);
- clk_put(dspi->clk);
-put_master:
- spi_master_put(master);
-irq_free:
- free_irq(dspi->irq, dspi);
-unmap_io:
- iounmap(dspi->base);
-release_region:
- release_mem_region(dspi->pbase, resource_size(r));
+ clk_disable_unprepare(dspi->clk);
free_master:
- kfree(master);
+ spi_master_put(master);
err:
return ret;
}
@@ -952,24 +1010,18 @@ err:
* It will also call spi_bitbang_stop to destroy the work queue which was
* created by spi_bitbang_start.
*/
-static int __devexit davinci_spi_remove(struct platform_device *pdev)
+static int davinci_spi_remove(struct platform_device *pdev)
{
struct davinci_spi *dspi;
struct spi_master *master;
- struct resource *r;
- master = dev_get_drvdata(&pdev->dev);
+ master = platform_get_drvdata(pdev);
dspi = spi_master_get_devdata(master);
spi_bitbang_stop(&dspi->bitbang);
- clk_disable(dspi->clk);
- clk_put(dspi->clk);
+ clk_disable_unprepare(dspi->clk);
spi_master_put(master);
- free_irq(dspi->irq, dspi);
- iounmap(dspi->base);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(dspi->pbase, resource_size(r));
return 0;
}
@@ -978,9 +1030,10 @@ static struct platform_driver davinci_spi_driver = {
.driver = {
.name = "spi_davinci",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(davinci_spi_of_match),
},
.probe = davinci_spi_probe,
- .remove = __devexit_p(davinci_spi_remove),
+ .remove = davinci_spi_remove,
};
module_platform_driver(davinci_spi_driver);
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index b9f0192758d..6d207afec8c 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
&dws->tx_sgl,
1,
DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
+ DMA_PREP_INTERRUPT);
txdesc->callback = dw_spi_dma_done;
txdesc->callback_param = dws;
@@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
&dws->rx_sgl,
1,
DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
+ DMA_PREP_INTERRUPT);
rxdesc->callback = dw_spi_dma_done;
rxdesc->callback_param = dws;
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index db2f1ba06ea..a5cba14ac3d 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -16,6 +16,7 @@
#include <linux/spi/spi.h>
#include <linux/scatterlist.h>
#include <linux/module.h>
+#include <linux/of_gpio.h>
#include "spi-dw.h"
@@ -26,18 +27,17 @@ struct dw_spi_mmio {
struct clk *clk;
};
-static int __devinit dw_spi_mmio_probe(struct platform_device *pdev)
+static int dw_spi_mmio_probe(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio;
struct dw_spi *dws;
- struct resource *mem, *ioarea;
+ struct resource *mem;
int ret;
- dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL);
- if (!dwsmmio) {
- ret = -ENOMEM;
- goto err_end;
- }
+ dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_mmio),
+ GFP_KERNEL);
+ if (!dwsmmio)
+ return -ENOMEM;
dws = &dwsmmio->dws;
@@ -45,91 +45,78 @@ static int __devinit dw_spi_mmio_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
- ret = -EINVAL;
- goto err_kfree;
- }
-
- ioarea = request_mem_region(mem->start, resource_size(mem),
- pdev->name);
- if (!ioarea) {
- dev_err(&pdev->dev, "SPI region already claimed\n");
- ret = -EBUSY;
- goto err_kfree;
+ return -EINVAL;
}
- dws->regs = ioremap_nocache(mem->start, resource_size(mem));
- if (!dws->regs) {
- dev_err(&pdev->dev, "SPI region already mapped\n");
- ret = -ENOMEM;
- goto err_release_reg;
+ dws->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dws->regs)) {
+ dev_err(&pdev->dev, "SPI region map failed\n");
+ return PTR_ERR(dws->regs);
}
dws->irq = platform_get_irq(pdev, 0);
if (dws->irq < 0) {
dev_err(&pdev->dev, "no irq resource?\n");
- ret = dws->irq; /* -ENXIO */
- goto err_unmap;
+ return dws->irq; /* -ENXIO */
}
- dwsmmio->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(dwsmmio->clk)) {
- ret = PTR_ERR(dwsmmio->clk);
- goto err_irq;
- }
- clk_enable(dwsmmio->clk);
+ dwsmmio->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dwsmmio->clk))
+ return PTR_ERR(dwsmmio->clk);
+ ret = clk_prepare_enable(dwsmmio->clk);
+ if (ret)
+ return ret;
- dws->parent_dev = &pdev->dev;
- dws->bus_num = 0;
+ dws->bus_num = pdev->id;
dws->num_cs = 4;
dws->max_freq = clk_get_rate(dwsmmio->clk);
- ret = dw_spi_add_host(dws);
+ if (pdev->dev.of_node) {
+ int i;
+
+ for (i = 0; i < dws->num_cs; i++) {
+ int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
+ "cs-gpios", i);
+
+ if (cs_gpio == -EPROBE_DEFER) {
+ ret = cs_gpio;
+ goto out;
+ }
+
+ if (gpio_is_valid(cs_gpio)) {
+ ret = devm_gpio_request(&pdev->dev, cs_gpio,
+ dev_name(&pdev->dev));
+ if (ret)
+ goto out;
+ }
+ }
+ }
+
+ ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
- goto err_clk;
+ goto out;
platform_set_drvdata(pdev, dwsmmio);
return 0;
-err_clk:
- clk_disable(dwsmmio->clk);
- clk_put(dwsmmio->clk);
- dwsmmio->clk = NULL;
-err_irq:
- free_irq(dws->irq, dws);
-err_unmap:
- iounmap(dws->regs);
-err_release_reg:
- release_mem_region(mem->start, resource_size(mem));
-err_kfree:
- kfree(dwsmmio);
-err_end:
+out:
+ clk_disable_unprepare(dwsmmio->clk);
return ret;
}
-static int __devexit dw_spi_mmio_remove(struct platform_device *pdev)
+static int dw_spi_mmio_remove(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
- struct resource *mem;
- platform_set_drvdata(pdev, NULL);
-
- clk_disable(dwsmmio->clk);
- clk_put(dwsmmio->clk);
- dwsmmio->clk = NULL;
-
- free_irq(dwsmmio->dws.irq, &dwsmmio->dws);
+ clk_disable_unprepare(dwsmmio->clk);
dw_spi_remove_host(&dwsmmio->dws);
- iounmap(dwsmmio->dws.regs);
- kfree(dwsmmio);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, resource_size(mem));
return 0;
}
static struct platform_driver dw_spi_mmio_driver = {
.probe = dw_spi_mmio_probe,
- .remove = __devexit_p(dw_spi_mmio_remove),
+ .remove = dw_spi_mmio_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index ff81abbb306..3f3dc1226ed 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -32,7 +32,7 @@ struct dw_spi_pci {
struct dw_spi dws;
};
-static int __devinit spi_pci_probe(struct pci_dev *pdev,
+static int spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct dw_spi_pci *dwpci;
@@ -40,38 +40,28 @@ static int __devinit spi_pci_probe(struct pci_dev *pdev,
int pci_bar = 0;
int ret;
- printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n",
+ dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
- dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL);
- if (!dwpci) {
- ret = -ENOMEM;
- goto err_disable;
- }
+ dwpci = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_pci),
+ GFP_KERNEL);
+ if (!dwpci)
+ return -ENOMEM;
dwpci->pdev = pdev;
dws = &dwpci->dws;
/* Get basic io resource and map it */
dws->paddr = pci_resource_start(pdev, pci_bar);
- dws->iolen = pci_resource_len(pdev, pci_bar);
- ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+ ret = pcim_iomap_regions(pdev, 1, dev_name(&pdev->dev));
if (ret)
- goto err_kfree;
-
- dws->regs = ioremap_nocache((unsigned long)dws->paddr,
- pci_resource_len(pdev, pci_bar));
- if (!dws->regs) {
- ret = -ENOMEM;
- goto err_release_reg;
- }
+ return ret;
- dws->parent_dev = &pdev->dev;
dws->bus_num = 0;
dws->num_cs = 4;
dws->irq = pdev->irq;
@@ -83,38 +73,24 @@ static int __devinit spi_pci_probe(struct pci_dev *pdev,
if (pdev->device == 0x0800) {
ret = dw_spi_mid_init(dws);
if (ret)
- goto err_unmap;
+ return ret;
}
- ret = dw_spi_add_host(dws);
+ ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
- goto err_unmap;
+ return ret;
/* PCI hook and SPI hook use the same drv data */
pci_set_drvdata(pdev, dwpci);
- return 0;
-err_unmap:
- iounmap(dws->regs);
-err_release_reg:
- pci_release_region(pdev, pci_bar);
-err_kfree:
- kfree(dwpci);
-err_disable:
- pci_disable_device(pdev);
- return ret;
+ return 0;
}
-static void __devexit spi_pci_remove(struct pci_dev *pdev)
+static void spi_pci_remove(struct pci_dev *pdev)
{
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
- pci_set_drvdata(pdev, NULL);
dw_spi_remove_host(&dwpci->dws);
- iounmap(dwpci->dws.regs);
- pci_release_region(pdev, 0);
- kfree(dwpci);
- pci_disable_device(pdev);
}
#ifdef CONFIG_PM
@@ -149,7 +125,7 @@ static int spi_resume(struct pci_dev *pdev)
#define spi_resume NULL
#endif
-static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+static const struct pci_device_id pci_ids[] = {
/* Intel MID platform SPI controller 0 */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
{},
@@ -159,7 +135,7 @@ static struct pci_driver dw_spi_driver = {
.name = DRIVER_NAME,
.id_table = pci_ids,
.probe = spi_pci_probe,
- .remove = __devexit_p(spi_pci_remove),
+ .remove = spi_pci_remove,
.suspend = spi_suspend,
.resume = spi_resume,
};
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index d1a495f64e2..29f33143b79 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include "spi-dw.h"
@@ -36,12 +37,6 @@
#define DONE_STATE ((void *)2)
#define ERROR_STATE ((void *)-1)
-#define QUEUE_RUNNING 0
-#define QUEUE_STOPPED 1
-
-#define MRST_SPI_DEASSERT 0
-#define MRST_SPI_ASSERT 1
-
/* Slave spi_dev related */
struct chip_data {
u16 cr0;
@@ -263,29 +258,22 @@ static int map_dma_buffers(struct dw_spi *dws)
static void giveback(struct dw_spi *dws)
{
struct spi_transfer *last_transfer;
- unsigned long flags;
struct spi_message *msg;
- spin_lock_irqsave(&dws->lock, flags);
msg = dws->cur_msg;
dws->cur_msg = NULL;
dws->cur_transfer = NULL;
dws->prev_chip = dws->cur_chip;
dws->cur_chip = NULL;
dws->dma_mapped = 0;
- queue_work(dws->workqueue, &dws->pump_messages);
- spin_unlock_irqrestore(&dws->lock, flags);
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer,
+ last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
transfer_list);
- if (!last_transfer->cs_change && dws->cs_control)
- dws->cs_control(MRST_SPI_DEASSERT);
+ if (!last_transfer->cs_change)
+ spi_chip_sel(dws, dws->cur_msg->spi, 0);
- msg->state = NULL;
- if (msg->complete)
- msg->complete(msg->context);
+ spi_finalize_current_message(dws->master);
}
static void int_error_stop(struct dw_spi *dws, const char *msg)
@@ -427,7 +415,6 @@ static void pump_transfers(unsigned long data)
dws->tx_end = dws->tx + transfer->len;
dws->rx = transfer->rx_buf;
dws->rx_end = dws->rx + transfer->len;
- dws->cs_change = transfer->cs_change;
dws->len = dws->cur_transfer->len;
if (chip != dws->prev_chip)
cs_change = 1;
@@ -440,12 +427,6 @@ static void pump_transfers(unsigned long data)
if (transfer->speed_hz != speed) {
speed = transfer->speed_hz;
- if (speed > dws->max_freq) {
- printk(KERN_ERR "MRST SPI0: unsupported"
- "freq: %dHz\n", speed);
- message->status = -EIO;
- goto early_exit;
- }
/* clk_div doesn't support odd number */
clk_div = dws->max_freq / speed;
@@ -457,19 +438,7 @@ static void pump_transfers(unsigned long data)
}
if (transfer->bits_per_word) {
bits = transfer->bits_per_word;
-
- switch (bits) {
- case 8:
- case 16:
- dws->n_bytes = dws->dma_width = bits >> 3;
- break;
- default:
- printk(KERN_ERR "MRST SPI0: unsupported bits:"
- "%db\n", bits);
- message->status = -EIO;
- goto early_exit;
- }
-
+ dws->n_bytes = dws->dma_width = bits >> 3;
cr0 = (bits - 1)
| (chip->type << SPI_FRF_OFFSET)
| (spi->mode << SPI_MODE_OFFSET)
@@ -522,7 +491,7 @@ static void pump_transfers(unsigned long data)
dw_writew(dws, DW_SPI_CTRL0, cr0);
spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
- spi_chip_sel(dws, spi->chip_select);
+ spi_chip_sel(dws, spi, 1);
/* Set the interrupt mask, for poll mode just disable all int */
spi_mask_intr(dws, 0xff);
@@ -549,30 +518,12 @@ early_exit:
return;
}
-static void pump_messages(struct work_struct *work)
+static int dw_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct dw_spi *dws =
- container_of(work, struct dw_spi, pump_messages);
- unsigned long flags;
-
- /* Lock queue and check for queue work */
- spin_lock_irqsave(&dws->lock, flags);
- if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
- dws->busy = 0;
- spin_unlock_irqrestore(&dws->lock, flags);
- return;
- }
-
- /* Make sure we are not already running a message */
- if (dws->cur_msg) {
- spin_unlock_irqrestore(&dws->lock, flags);
- return;
- }
-
- /* Extract head of queue */
- dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
- list_del_init(&dws->cur_msg->queue);
+ struct dw_spi *dws = spi_master_get_devdata(master);
+ dws->cur_msg = msg;
/* Initial message state*/
dws->cur_msg->state = START_STATE;
dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
@@ -580,46 +531,9 @@ static void pump_messages(struct work_struct *work)
transfer_list);
dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
- /* Mark as busy and launch transfers */
+ /* Launch transfers */
tasklet_schedule(&dws->pump_transfers);
- dws->busy = 1;
- spin_unlock_irqrestore(&dws->lock, flags);
-}
-
-/* spi_device use this to queue in their spi_msg */
-static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
-{
- struct dw_spi *dws = spi_master_get_devdata(spi->master);
- unsigned long flags;
-
- spin_lock_irqsave(&dws->lock, flags);
-
- if (dws->run == QUEUE_STOPPED) {
- spin_unlock_irqrestore(&dws->lock, flags);
- return -ESHUTDOWN;
- }
-
- msg->actual_length = 0;
- msg->status = -EINPROGRESS;
- msg->state = START_STATE;
-
- list_add_tail(&msg->queue, &dws->queue);
-
- if (dws->run == QUEUE_RUNNING && !dws->busy) {
-
- if (dws->cur_transfer || dws->cur_msg)
- queue_work(dws->workqueue,
- &dws->pump_messages);
- else {
- /* If no other data transaction in air, just go */
- spin_unlock_irqrestore(&dws->lock, flags);
- pump_messages(&dws->pump_messages);
- return 0;
- }
- }
-
- spin_unlock_irqrestore(&dws->lock, flags);
return 0;
}
@@ -628,16 +542,16 @@ static int dw_spi_setup(struct spi_device *spi)
{
struct dw_spi_chip *chip_info = NULL;
struct chip_data *chip;
-
- if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
- return -EINVAL;
+ int ret;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
- chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
+ GFP_KERNEL);
if (!chip)
return -ENOMEM;
+ spi_set_ctldata(spi, chip);
}
/*
@@ -660,16 +574,12 @@ static int dw_spi_setup(struct spi_device *spi)
chip->enable_dma = chip_info->enable_dma;
}
- if (spi->bits_per_word <= 8) {
+ if (spi->bits_per_word == 8) {
chip->n_bytes = 1;
chip->dma_width = 1;
- } else if (spi->bits_per_word <= 16) {
+ } else if (spi->bits_per_word == 16) {
chip->n_bytes = 2;
chip->dma_width = 2;
- } else {
- /* Never take >16b case for MRST SPIC */
- dev_err(&spi->dev, "invalid wordsize\n");
- return -EINVAL;
}
chip->bits_per_word = spi->bits_per_word;
@@ -686,88 +596,13 @@ static int dw_spi_setup(struct spi_device *spi)
| (spi->mode << SPI_MODE_OFFSET)
| (chip->tmode << SPI_TMOD_OFFSET);
- spi_set_ctldata(spi, chip);
- return 0;
-}
-
-static void dw_spi_cleanup(struct spi_device *spi)
-{
- struct chip_data *chip = spi_get_ctldata(spi);
- kfree(chip);
-}
-
-static int __devinit init_queue(struct dw_spi *dws)
-{
- INIT_LIST_HEAD(&dws->queue);
- spin_lock_init(&dws->lock);
-
- dws->run = QUEUE_STOPPED;
- dws->busy = 0;
-
- tasklet_init(&dws->pump_transfers,
- pump_transfers, (unsigned long)dws);
-
- INIT_WORK(&dws->pump_messages, pump_messages);
- dws->workqueue = create_singlethread_workqueue(
- dev_name(dws->master->dev.parent));
- if (dws->workqueue == NULL)
- return -EBUSY;
-
- return 0;
-}
-
-static int start_queue(struct dw_spi *dws)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dws->lock, flags);
-
- if (dws->run == QUEUE_RUNNING || dws->busy) {
- spin_unlock_irqrestore(&dws->lock, flags);
- return -EBUSY;
- }
-
- dws->run = QUEUE_RUNNING;
- dws->cur_msg = NULL;
- dws->cur_transfer = NULL;
- dws->cur_chip = NULL;
- dws->prev_chip = NULL;
- spin_unlock_irqrestore(&dws->lock, flags);
-
- queue_work(dws->workqueue, &dws->pump_messages);
-
- return 0;
-}
-
-static int stop_queue(struct dw_spi *dws)
-{
- unsigned long flags;
- unsigned limit = 50;
- int status = 0;
-
- spin_lock_irqsave(&dws->lock, flags);
- dws->run = QUEUE_STOPPED;
- while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
- spin_unlock_irqrestore(&dws->lock, flags);
- msleep(10);
- spin_lock_irqsave(&dws->lock, flags);
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ret = gpio_direction_output(spi->cs_gpio,
+ !(spi->mode & SPI_CS_HIGH));
+ if (ret)
+ return ret;
}
- if (!list_empty(&dws->queue) || dws->busy)
- status = -EBUSY;
- spin_unlock_irqrestore(&dws->lock, flags);
-
- return status;
-}
-
-static int destroy_queue(struct dw_spi *dws)
-{
- int status;
-
- status = stop_queue(dws);
- if (status != 0)
- return status;
- destroy_workqueue(dws->workqueue);
return 0;
}
@@ -795,18 +630,16 @@ static void spi_hw_init(struct dw_spi *dws)
}
}
-int __devinit dw_spi_add_host(struct dw_spi *dws)
+int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
{
struct spi_master *master;
int ret;
BUG_ON(dws == NULL);
- master = spi_alloc_master(dws->parent_dev, 0);
- if (!master) {
- ret = -ENOMEM;
- goto exit;
- }
+ master = spi_alloc_master(dev, 0);
+ if (!master)
+ return -ENOMEM;
dws->master = master;
dws->type = SSI_MOTO_SPI;
@@ -816,7 +649,7 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
dws->bus_num);
- ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
+ ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED,
dws->name, dws);
if (ret < 0) {
dev_err(&master->dev, "can not get IRQ\n");
@@ -824,11 +657,12 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
}
master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->bus_num = dws->bus_num;
master->num_chipselect = dws->num_cs;
- master->cleanup = dw_spi_cleanup;
master->setup = dw_spi_setup;
- master->transfer = dw_spi_transfer;
+ master->transfer_one_message = dw_spi_transfer_one_message;
+ master->max_speed_hz = dws->max_freq;
/* Basic HW init */
spi_hw_init(dws);
@@ -841,65 +675,39 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
}
}
- /* Initial and start queue */
- ret = init_queue(dws);
- if (ret) {
- dev_err(&master->dev, "problem initializing queue\n");
- goto err_diable_hw;
- }
- ret = start_queue(dws);
- if (ret) {
- dev_err(&master->dev, "problem starting queue\n");
- goto err_diable_hw;
- }
+ tasklet_init(&dws->pump_transfers, pump_transfers, (unsigned long)dws);
spi_master_set_devdata(master, dws);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret) {
dev_err(&master->dev, "problem registering spi master\n");
- goto err_queue_alloc;
+ goto err_dma_exit;
}
mrst_spi_debugfs_init(dws);
return 0;
-err_queue_alloc:
- destroy_queue(dws);
+err_dma_exit:
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
-err_diable_hw:
spi_enable_chip(dws, 0);
- free_irq(dws->irq, dws);
err_free_master:
spi_master_put(master);
-exit:
return ret;
}
EXPORT_SYMBOL_GPL(dw_spi_add_host);
-void __devexit dw_spi_remove_host(struct dw_spi *dws)
+void dw_spi_remove_host(struct dw_spi *dws)
{
- int status = 0;
-
if (!dws)
return;
mrst_spi_debugfs_remove(dws);
- /* Remove the queue */
- status = destroy_queue(dws);
- if (status != 0)
- dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
- "complete, message memory not freed\n");
-
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
spi_enable_chip(dws, 0);
/* Disable clk */
spi_set_clk(dws, 0);
- free_irq(dws->irq, dws);
-
- /* Disconnect from the SPI framework */
- spi_unregister_master(dws->master);
}
EXPORT_SYMBOL_GPL(dw_spi_remove_host);
@@ -907,7 +715,7 @@ int dw_spi_suspend_host(struct dw_spi *dws)
{
int ret = 0;
- ret = stop_queue(dws);
+ ret = spi_master_suspend(dws->master);
if (ret)
return ret;
spi_enable_chip(dws, 0);
@@ -921,7 +729,7 @@ int dw_spi_resume_host(struct dw_spi *dws)
int ret;
spi_hw_init(dws);
- ret = start_queue(dws);
+ ret = spi_master_resume(dws->master);
if (ret)
dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
return ret;
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 9c57c078031..6d2acad34f6 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -3,6 +3,7 @@
#include <linux/io.h>
#include <linux/scatterlist.h>
+#include <linux/gpio.h>
/* Register offsets */
#define DW_SPI_CTRL0 0x00
@@ -92,13 +93,11 @@ struct dw_spi_dma_ops {
struct dw_spi {
struct spi_master *master;
struct spi_device *cur_dev;
- struct device *parent_dev;
enum dw_ssi_type type;
char name[16];
void __iomem *regs;
unsigned long paddr;
- u32 iolen;
int irq;
u32 fifo_len; /* depth of the FIFO buffer */
u32 max_freq; /* max bus freq supported */
@@ -106,14 +105,6 @@ struct dw_spi {
u16 bus_num;
u16 num_cs; /* supported slave numbers */
- /* Driver message queue */
- struct workqueue_struct *workqueue;
- struct work_struct pump_messages;
- spinlock_t lock;
- struct list_head queue;
- int busy;
- int run;
-
/* Message Transfer pump */
struct tasklet_struct pump_transfers;
@@ -135,7 +126,6 @@ struct dw_spi {
u8 n_bytes; /* current is a 1/2 bytes op */
u8 max_bits_per_word; /* maxim is 16b */
u32 dma_width;
- int cs_change;
irqreturn_t (*transfer_handler)(struct dw_spi *dws);
void (*cs_control)(u32 command);
@@ -189,15 +179,20 @@ static inline void spi_set_clk(struct dw_spi *dws, u16 div)
dw_writel(dws, DW_SPI_BAUDR, div);
}
-static inline void spi_chip_sel(struct dw_spi *dws, u16 cs)
+static inline void spi_chip_sel(struct dw_spi *dws, struct spi_device *spi,
+ int active)
{
- if (cs > dws->num_cs)
- return;
+ u16 cs = spi->chip_select;
+ int gpio_val = active ? (spi->mode & SPI_CS_HIGH) :
+ !(spi->mode & SPI_CS_HIGH);
if (dws->cs_control)
- dws->cs_control(1);
+ dws->cs_control(active);
+ if (gpio_is_valid(spi->cs_gpio))
+ gpio_set_value(spi->cs_gpio, gpio_val);
- dw_writel(dws, DW_SPI_SER, 1 << cs);
+ if (active)
+ dw_writel(dws, DW_SPI_SER, 1 << cs);
}
/* Disable IRQ bits */
@@ -231,7 +226,7 @@ struct dw_spi_chip {
void (*cs_control)(u32 command);
};
-extern int dw_spi_add_host(struct dw_spi *dws);
+extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
extern void dw_spi_remove_host(struct dw_spi *dws);
extern int dw_spi_suspend_host(struct dw_spi *dws);
extern int dw_spi_resume_host(struct dw_spi *dws);
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
new file mode 100644
index 00000000000..be44a3eeb5e
--- /dev/null
+++ b/drivers/spi/spi-efm32.c
@@ -0,0 +1,500 @@
+/*
+ * Copyright (C) 2012-2013 Uwe Kleine-Koenig for Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_data/efm32-spi.h>
+
+#define DRIVER_NAME "efm32-spi"
+
+#define MASK_VAL(mask, val) ((val << __ffs(mask)) & mask)
+
+#define REG_CTRL 0x00
+#define REG_CTRL_SYNC 0x0001
+#define REG_CTRL_CLKPOL 0x0100
+#define REG_CTRL_CLKPHA 0x0200
+#define REG_CTRL_MSBF 0x0400
+#define REG_CTRL_TXBIL 0x1000
+
+#define REG_FRAME 0x04
+#define REG_FRAME_DATABITS__MASK 0x000f
+#define REG_FRAME_DATABITS(n) ((n) - 3)
+
+#define REG_CMD 0x0c
+#define REG_CMD_RXEN 0x0001
+#define REG_CMD_RXDIS 0x0002
+#define REG_CMD_TXEN 0x0004
+#define REG_CMD_TXDIS 0x0008
+#define REG_CMD_MASTEREN 0x0010
+
+#define REG_STATUS 0x10
+#define REG_STATUS_TXENS 0x0002
+#define REG_STATUS_TXC 0x0020
+#define REG_STATUS_TXBL 0x0040
+#define REG_STATUS_RXDATAV 0x0080
+
+#define REG_CLKDIV 0x14
+
+#define REG_RXDATAX 0x18
+#define REG_RXDATAX_RXDATA__MASK 0x01ff
+#define REG_RXDATAX_PERR 0x4000
+#define REG_RXDATAX_FERR 0x8000
+
+#define REG_TXDATA 0x34
+
+#define REG_IF 0x40
+#define REG_IF_TXBL 0x0002
+#define REG_IF_RXDATAV 0x0004
+
+#define REG_IFS 0x44
+#define REG_IFC 0x48
+#define REG_IEN 0x4c
+
+#define REG_ROUTE 0x54
+#define REG_ROUTE_RXPEN 0x0001
+#define REG_ROUTE_TXPEN 0x0002
+#define REG_ROUTE_CLKPEN 0x0008
+#define REG_ROUTE_LOCATION__MASK 0x0700
+#define REG_ROUTE_LOCATION(n) MASK_VAL(REG_ROUTE_LOCATION__MASK, (n))
+
+struct efm32_spi_ddata {
+ struct spi_bitbang bitbang;
+
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ unsigned int rxirq, txirq;
+ struct efm32_spi_pdata pdata;
+
+ /* irq data */
+ struct completion done;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ unsigned tx_len, rx_len;
+
+ /* chip selects */
+ unsigned csgpio[];
+};
+
+#define ddata_to_dev(ddata) (&(ddata->bitbang.master->dev))
+#define efm32_spi_vdbg(ddata, format, arg...) \
+ dev_vdbg(ddata_to_dev(ddata), format, ##arg)
+
+static void efm32_spi_write32(struct efm32_spi_ddata *ddata,
+ u32 value, unsigned offset)
+{
+ writel_relaxed(value, ddata->base + offset);
+}
+
+static u32 efm32_spi_read32(struct efm32_spi_ddata *ddata, unsigned offset)
+{
+ return readl_relaxed(ddata->base + offset);
+}
+
+static void efm32_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
+ int value = !(spi->mode & SPI_CS_HIGH) == !(is_on == BITBANG_CS_ACTIVE);
+
+ gpio_set_value(ddata->csgpio[spi->chip_select], value);
+}
+
+static int efm32_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
+
+ unsigned bpw = t->bits_per_word ?: spi->bits_per_word;
+ unsigned speed = t->speed_hz ?: spi->max_speed_hz;
+ unsigned long clkfreq = clk_get_rate(ddata->clk);
+ u32 clkdiv;
+
+ efm32_spi_write32(ddata, REG_CTRL_SYNC | REG_CTRL_MSBF |
+ (spi->mode & SPI_CPHA ? REG_CTRL_CLKPHA : 0) |
+ (spi->mode & SPI_CPOL ? REG_CTRL_CLKPOL : 0), REG_CTRL);
+
+ efm32_spi_write32(ddata,
+ REG_FRAME_DATABITS(bpw), REG_FRAME);
+
+ if (2 * speed >= clkfreq)
+ clkdiv = 0;
+ else
+ clkdiv = 64 * (DIV_ROUND_UP(2 * clkfreq, speed) - 4);
+
+ if (clkdiv > (1U << 21))
+ return -EINVAL;
+
+ efm32_spi_write32(ddata, clkdiv, REG_CLKDIV);
+ efm32_spi_write32(ddata, REG_CMD_MASTEREN, REG_CMD);
+ efm32_spi_write32(ddata, REG_CMD_RXEN | REG_CMD_TXEN, REG_CMD);
+
+ return 0;
+}
+
+static void efm32_spi_tx_u8(struct efm32_spi_ddata *ddata)
+{
+ u8 val = 0;
+
+ if (ddata->tx_buf) {
+ val = *ddata->tx_buf;
+ ddata->tx_buf++;
+ }
+
+ ddata->tx_len--;
+ efm32_spi_write32(ddata, val, REG_TXDATA);
+ efm32_spi_vdbg(ddata, "%s: tx 0x%x\n", __func__, val);
+}
+
+static void efm32_spi_rx_u8(struct efm32_spi_ddata *ddata)
+{
+ u32 rxdata = efm32_spi_read32(ddata, REG_RXDATAX);
+ efm32_spi_vdbg(ddata, "%s: rx 0x%x\n", __func__, rxdata);
+
+ if (ddata->rx_buf) {
+ *ddata->rx_buf = rxdata;
+ ddata->rx_buf++;
+ }
+
+ ddata->rx_len--;
+}
+
+static void efm32_spi_filltx(struct efm32_spi_ddata *ddata)
+{
+ while (ddata->tx_len &&
+ ddata->tx_len + 2 > ddata->rx_len &&
+ efm32_spi_read32(ddata, REG_STATUS) & REG_STATUS_TXBL) {
+ efm32_spi_tx_u8(ddata);
+ }
+}
+
+static int efm32_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
+ int ret = -EBUSY;
+
+ spin_lock_irq(&ddata->lock);
+
+ if (ddata->tx_buf || ddata->rx_buf)
+ goto out_unlock;
+
+ ddata->tx_buf = t->tx_buf;
+ ddata->rx_buf = t->rx_buf;
+ ddata->tx_len = ddata->rx_len =
+ t->len * DIV_ROUND_UP(t->bits_per_word, 8);
+
+ efm32_spi_filltx(ddata);
+
+ reinit_completion(&ddata->done);
+
+ efm32_spi_write32(ddata, REG_IF_TXBL | REG_IF_RXDATAV, REG_IEN);
+
+ spin_unlock_irq(&ddata->lock);
+
+ wait_for_completion(&ddata->done);
+
+ spin_lock_irq(&ddata->lock);
+
+ ret = t->len - max(ddata->tx_len, ddata->rx_len);
+
+ efm32_spi_write32(ddata, 0, REG_IEN);
+ ddata->tx_buf = ddata->rx_buf = NULL;
+
+out_unlock:
+ spin_unlock_irq(&ddata->lock);
+
+ return ret;
+}
+
+static irqreturn_t efm32_spi_rxirq(int irq, void *data)
+{
+ struct efm32_spi_ddata *ddata = data;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&ddata->lock);
+
+ while (ddata->rx_len > 0 &&
+ efm32_spi_read32(ddata, REG_STATUS) &
+ REG_STATUS_RXDATAV) {
+ efm32_spi_rx_u8(ddata);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (!ddata->rx_len) {
+ u32 ien = efm32_spi_read32(ddata, REG_IEN);
+
+ ien &= ~REG_IF_RXDATAV;
+
+ efm32_spi_write32(ddata, ien, REG_IEN);
+
+ complete(&ddata->done);
+ }
+
+ spin_unlock(&ddata->lock);
+
+ return ret;
+}
+
+static irqreturn_t efm32_spi_txirq(int irq, void *data)
+{
+ struct efm32_spi_ddata *ddata = data;
+
+ efm32_spi_vdbg(ddata,
+ "%s: txlen = %u, rxlen = %u, if=0x%08x, stat=0x%08x\n",
+ __func__, ddata->tx_len, ddata->rx_len,
+ efm32_spi_read32(ddata, REG_IF),
+ efm32_spi_read32(ddata, REG_STATUS));
+
+ spin_lock(&ddata->lock);
+
+ efm32_spi_filltx(ddata);
+
+ efm32_spi_vdbg(ddata, "%s: txlen = %u, rxlen = %u\n",
+ __func__, ddata->tx_len, ddata->rx_len);
+
+ if (!ddata->tx_len) {
+ u32 ien = efm32_spi_read32(ddata, REG_IEN);
+
+ ien &= ~REG_IF_TXBL;
+
+ efm32_spi_write32(ddata, ien, REG_IEN);
+ efm32_spi_vdbg(ddata, "disable TXBL\n");
+ }
+
+ spin_unlock(&ddata->lock);
+
+ return IRQ_HANDLED;
+}
+
+static u32 efm32_spi_get_configured_location(struct efm32_spi_ddata *ddata)
+{
+ u32 reg = efm32_spi_read32(ddata, REG_ROUTE);
+
+ return (reg & REG_ROUTE_LOCATION__MASK) >> __ffs(REG_ROUTE_LOCATION__MASK);
+}
+
+static void efm32_spi_probe_dt(struct platform_device *pdev,
+ struct spi_master *master, struct efm32_spi_ddata *ddata)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 location;
+ int ret;
+
+ ret = of_property_read_u32(np, "efm32,location", &location);
+ if (ret)
+ /* fall back to old and (wrongly) generic property "location" */
+ ret = of_property_read_u32(np, "location", &location);
+ if (!ret) {
+ dev_dbg(&pdev->dev, "using location %u\n", location);
+ } else {
+ /* default to location configured in hardware */
+ location = efm32_spi_get_configured_location(ddata);
+
+ dev_info(&pdev->dev, "fall back to location %u\n", location);
+ }
+
+ ddata->pdata.location = location;
+}
+
+static int efm32_spi_probe(struct platform_device *pdev)
+{
+ struct efm32_spi_ddata *ddata;
+ struct resource *res;
+ int ret;
+ struct spi_master *master;
+ struct device_node *np = pdev->dev.of_node;
+ int num_cs, i;
+
+ if (!np)
+ return -EINVAL;
+
+ num_cs = of_gpio_named_count(np, "cs-gpios");
+ if (num_cs < 0)
+ return num_cs;
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(*ddata) + num_cs * sizeof(unsigned));
+ if (!master) {
+ dev_dbg(&pdev->dev,
+ "failed to allocate spi master controller\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+
+ master->dev.of_node = pdev->dev.of_node;
+
+ master->num_chipselect = num_cs;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+
+ ddata = spi_master_get_devdata(master);
+
+ ddata->bitbang.master = master;
+ ddata->bitbang.chipselect = efm32_spi_chipselect;
+ ddata->bitbang.setup_transfer = efm32_spi_setup_transfer;
+ ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs;
+
+ spin_lock_init(&ddata->lock);
+ init_completion(&ddata->done);
+
+ ddata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ddata->clk)) {
+ ret = PTR_ERR(ddata->clk);
+ dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < num_cs; ++i) {
+ ret = of_get_named_gpio(np, "cs-gpios", i);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get csgpio#%u (%d)\n",
+ i, ret);
+ goto err;
+ }
+ ddata->csgpio[i] = ret;
+ dev_dbg(&pdev->dev, "csgpio#%u = %u\n", i, ddata->csgpio[i]);
+ ret = devm_gpio_request_one(&pdev->dev, ddata->csgpio[i],
+ GPIOF_OUT_INIT_LOW, DRIVER_NAME);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to configure csgpio#%u (%d)\n",
+ i, ret);
+ goto err;
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "failed to determine base address\n");
+ goto err;
+ }
+
+ if (resource_size(res) < 0x60) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "memory resource too small\n");
+ goto err;
+ }
+
+ ddata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddata->base)) {
+ ret = PTR_ERR(ddata->base);
+ goto err;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(&pdev->dev, "failed to get rx irq (%d)\n", ret);
+ goto err;
+ }
+
+ ddata->rxirq = ret;
+
+ ret = platform_get_irq(pdev, 1);
+ if (ret <= 0)
+ ret = ddata->rxirq + 1;
+
+ ddata->txirq = ret;
+
+ ret = clk_prepare_enable(ddata->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable clock (%d)\n", ret);
+ goto err;
+ }
+
+ efm32_spi_probe_dt(pdev, master, ddata);
+
+ efm32_spi_write32(ddata, 0, REG_IEN);
+ efm32_spi_write32(ddata, REG_ROUTE_TXPEN | REG_ROUTE_RXPEN |
+ REG_ROUTE_CLKPEN |
+ REG_ROUTE_LOCATION(ddata->pdata.location), REG_ROUTE);
+
+ ret = request_irq(ddata->rxirq, efm32_spi_rxirq,
+ 0, DRIVER_NAME " rx", ddata);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register rxirq (%d)\n", ret);
+ goto err_disable_clk;
+ }
+
+ ret = request_irq(ddata->txirq, efm32_spi_txirq,
+ 0, DRIVER_NAME " tx", ddata);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register txirq (%d)\n", ret);
+ goto err_free_rx_irq;
+ }
+
+ ret = spi_bitbang_start(&ddata->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_bitbang_start failed (%d)\n", ret);
+
+ free_irq(ddata->txirq, ddata);
+err_free_rx_irq:
+ free_irq(ddata->rxirq, ddata);
+err_disable_clk:
+ clk_disable_unprepare(ddata->clk);
+err:
+ spi_master_put(master);
+ }
+
+ return ret;
+}
+
+static int efm32_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&ddata->bitbang);
+
+ efm32_spi_write32(ddata, 0, REG_IEN);
+
+ free_irq(ddata->txirq, ddata);
+ free_irq(ddata->rxirq, ddata);
+ clk_disable_unprepare(ddata->clk);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static const struct of_device_id efm32_spi_dt_ids[] = {
+ {
+ .compatible = "energymicro,efm32-spi",
+ }, {
+ /* doesn't follow the "vendor,device" scheme, don't use */
+ .compatible = "efm32,spi",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, efm32_spi_dt_ids);
+
+static struct platform_driver efm32_spi_driver = {
+ .probe = efm32_spi_probe,
+ .remove = efm32_spi_remove,
+
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = efm32_spi_dt_ids,
+ },
+};
+module_platform_driver(efm32_spi_driver);
+
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_DESCRIPTION("EFM32 SPI driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 3a219599612..2f675d32df0 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -26,7 +26,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/spi/spi.h>
@@ -70,19 +69,11 @@
/**
* struct ep93xx_spi - EP93xx SPI controller structure
- * @lock: spinlock that protects concurrent accesses to fields @running,
- * @current_msg and @msg_queue
* @pdev: pointer to platform device
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
* @sspdr_phys: physical address of the SSPDR register
- * @min_rate: minimum clock rate (in Hz) supported by the controller
- * @max_rate: maximum clock rate (in Hz) supported by the controller
- * @running: is the queue running
- * @wq: workqueue used by the driver
- * @msg_work: work that is queued for the driver
* @wait: wait here until given transfer is completed
- * @msg_queue: queue for the messages
* @current_msg: message that is currently processed (or %NULL if none)
* @tx: current byte in transfer to transmit
* @rx: current byte in transfer to receive
@@ -96,30 +87,13 @@
* @tx_sgt: sg table for TX transfers
* @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
* the client
- *
- * This structure holds EP93xx SPI controller specific information. When
- * @running is %true, driver accepts transfer requests from protocol drivers.
- * @current_msg is used to hold pointer to the message that is currently
- * processed. If @current_msg is %NULL, it means that no processing is going
- * on.
- *
- * Most of the fields are only written once and they can be accessed without
- * taking the @lock. Fields that are accessed concurrently are: @current_msg,
- * @running, and @msg_queue.
*/
struct ep93xx_spi {
- spinlock_t lock;
const struct platform_device *pdev;
struct clk *clk;
void __iomem *regs_base;
unsigned long sspdr_phys;
- unsigned long min_rate;
- unsigned long max_rate;
- bool running;
- struct workqueue_struct *wq;
- struct work_struct msg_work;
struct completion wait;
- struct list_head msg_queue;
struct spi_message *current_msg;
size_t tx;
size_t rx;
@@ -136,50 +110,36 @@ struct ep93xx_spi {
/**
* struct ep93xx_spi_chip - SPI device hardware settings
* @spi: back pointer to the SPI device
- * @rate: max rate in hz this chip supports
- * @div_cpsr: cpsr (pre-scaler) divider
- * @div_scr: scr divider
- * @dss: bits per word (4 - 16 bits)
* @ops: private chip operations
- *
- * This structure is used to store hardware register specific settings for each
- * SPI device. Settings are written to hardware by function
- * ep93xx_spi_chip_setup().
*/
struct ep93xx_spi_chip {
const struct spi_device *spi;
- unsigned long rate;
- u8 div_cpsr;
- u8 div_scr;
- u8 dss;
struct ep93xx_spi_chip_ops *ops;
};
/* converts bits per word to CR0.DSS value */
#define bits_per_word_to_dss(bpw) ((bpw) - 1)
-static inline void
-ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
+static void ep93xx_spi_write_u8(const struct ep93xx_spi *espi,
+ u16 reg, u8 value)
{
- __raw_writeb(value, espi->regs_base + reg);
+ writeb(value, espi->regs_base + reg);
}
-static inline u8
-ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
+static u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
{
- return __raw_readb(spi->regs_base + reg);
+ return readb(spi->regs_base + reg);
}
-static inline void
-ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
+static void ep93xx_spi_write_u16(const struct ep93xx_spi *espi,
+ u16 reg, u16 value)
{
- __raw_writew(value, espi->regs_base + reg);
+ writew(value, espi->regs_base + reg);
}
-static inline u16
-ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
+static u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
{
- return __raw_readw(spi->regs_base + reg);
+ return readw(spi->regs_base + reg);
}
static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
@@ -230,27 +190,23 @@ static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
/**
* ep93xx_spi_calc_divisors() - calculates SPI clock divisors
* @espi: ep93xx SPI controller struct
- * @chip: divisors are calculated for this chip
* @rate: desired SPI output clock rate
- *
- * Function calculates cpsr (clock pre-scaler) and scr divisors based on
- * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
- * for some reason, divisors cannot be calculated nothing is stored and
- * %-EINVAL is returned.
+ * @div_cpsr: pointer to return the cpsr (pre-scaler) divider
+ * @div_scr: pointer to return the scr divider
*/
static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
- struct ep93xx_spi_chip *chip,
- unsigned long rate)
+ u32 rate, u8 *div_cpsr, u8 *div_scr)
{
+ struct spi_master *master = platform_get_drvdata(espi->pdev);
unsigned long spi_clk_rate = clk_get_rate(espi->clk);
int cpsr, scr;
/*
* Make sure that max value is between values supported by the
* controller. Note that minimum value is already checked in
- * ep93xx_spi_transfer().
+ * ep93xx_spi_transfer_one_message().
*/
- rate = clamp(rate, espi->min_rate, espi->max_rate);
+ rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
/*
* Calculate divisors so that we can get speed according the
@@ -263,8 +219,8 @@ static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
for (cpsr = 2; cpsr <= 254; cpsr += 2) {
for (scr = 0; scr <= 255; scr++) {
if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
- chip->div_scr = (u8)scr;
- chip->div_cpsr = (u8)cpsr;
+ *div_scr = (u8)scr;
+ *div_cpsr = (u8)cpsr;
return 0;
}
}
@@ -296,12 +252,6 @@ static int ep93xx_spi_setup(struct spi_device *spi)
struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
struct ep93xx_spi_chip *chip;
- if (spi->bits_per_word < 4 || spi->bits_per_word > 16) {
- dev_err(&espi->pdev->dev, "invalid bits per word %d\n",
- spi->bits_per_word);
- return -EINVAL;
- }
-
chip = spi_get_ctldata(spi);
if (!chip) {
dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
@@ -325,77 +275,11 @@ static int ep93xx_spi_setup(struct spi_device *spi)
spi_set_ctldata(spi, chip);
}
- if (spi->max_speed_hz != chip->rate) {
- int err;
-
- err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
- if (err != 0) {
- spi_set_ctldata(spi, NULL);
- kfree(chip);
- return err;
- }
- chip->rate = spi->max_speed_hz;
- }
-
- chip->dss = bits_per_word_to_dss(spi->bits_per_word);
-
ep93xx_spi_cs_control(spi, false);
return 0;
}
/**
- * ep93xx_spi_transfer() - queue message to be transferred
- * @spi: target SPI device
- * @msg: message to be transferred
- *
- * This function is called by SPI device drivers when they are going to transfer
- * a new message. It simply puts the message in the queue and schedules
- * workqueue to perform the actual transfer later on.
- *
- * Returns %0 on success and negative error in case of failure.
- */
-static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
-{
- struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
- struct spi_transfer *t;
- unsigned long flags;
-
- if (!msg || !msg->complete)
- return -EINVAL;
-
- /* first validate each transfer */
- list_for_each_entry(t, &msg->transfers, transfer_list) {
- if (t->bits_per_word) {
- if (t->bits_per_word < 4 || t->bits_per_word > 16)
- return -EINVAL;
- }
- if (t->speed_hz && t->speed_hz < espi->min_rate)
- return -EINVAL;
- }
-
- /*
- * Now that we own the message, let's initialize it so that it is
- * suitable for us. We use @msg->status to signal whether there was
- * error in transfer and @msg->state is used to hold pointer to the
- * current transfer (or %NULL if no active current transfer).
- */
- msg->state = NULL;
- msg->status = 0;
- msg->actual_length = 0;
-
- spin_lock_irqsave(&espi->lock, flags);
- if (!espi->running) {
- spin_unlock_irqrestore(&espi->lock, flags);
- return -ESHUTDOWN;
- }
- list_add_tail(&msg->queue, &espi->msg_queue);
- queue_work(espi->wq, &espi->msg_work);
- spin_unlock_irqrestore(&espi->lock, flags);
-
- return 0;
-}
-
-/**
* ep93xx_spi_cleanup() - cleans up master controller specific state
* @spi: SPI device to cleanup
*
@@ -419,39 +303,40 @@ static void ep93xx_spi_cleanup(struct spi_device *spi)
* ep93xx_spi_chip_setup() - configures hardware according to given @chip
* @espi: ep93xx SPI controller struct
* @chip: chip specific settings
- *
- * This function sets up the actual hardware registers with settings given in
- * @chip. Note that no validation is done so make sure that callers validate
- * settings before calling this.
+ * @speed_hz: transfer speed
+ * @bits_per_word: transfer bits_per_word
*/
-static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
- const struct ep93xx_spi_chip *chip)
+static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
+ const struct ep93xx_spi_chip *chip,
+ u32 speed_hz, u8 bits_per_word)
{
+ u8 dss = bits_per_word_to_dss(bits_per_word);
+ u8 div_cpsr = 0;
+ u8 div_scr = 0;
u16 cr0;
+ int err;
- cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
+ err = ep93xx_spi_calc_divisors(espi, speed_hz, &div_cpsr, &div_scr);
+ if (err)
+ return err;
+
+ cr0 = div_scr << SSPCR0_SCR_SHIFT;
cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
- cr0 |= chip->dss;
+ cr0 |= dss;
dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
- chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
- dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
+ chip->spi->mode, div_cpsr, div_scr, dss);
+ dev_dbg(&espi->pdev->dev, "setup: cr0 %#x\n", cr0);
- ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
+ ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr);
ep93xx_spi_write_u16(espi, SSPCR0, cr0);
-}
-static inline int bits_per_word(const struct ep93xx_spi *espi)
-{
- struct spi_message *msg = espi->current_msg;
- struct spi_transfer *t = msg->state;
-
- return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
+ return 0;
}
static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
{
- if (bits_per_word(espi) > 8) {
+ if (t->bits_per_word > 8) {
u16 tx_val = 0;
if (t->tx_buf)
@@ -470,7 +355,7 @@ static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
{
- if (bits_per_word(espi) > 8) {
+ if (t->bits_per_word > 8) {
u16 rx_val;
rx_val = ep93xx_spi_read_u16(espi, SSPDR);
@@ -556,7 +441,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
size_t len = t->len;
int i, ret, nents;
- if (bits_per_word(espi) > 8)
+ if (t->bits_per_word > 8)
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
else
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
@@ -620,7 +505,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
}
if (WARN_ON(len)) {
- dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
+ dev_warn(&espi->pdev->dev, "len = %zu expected 0!\n", len);
return ERR_PTR(-EINVAL);
}
@@ -718,37 +603,16 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
struct spi_transfer *t)
{
struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
+ int err;
msg->state = t;
- /*
- * Handle any transfer specific settings if needed. We use
- * temporary chip settings here and restore original later when
- * the transfer is finished.
- */
- if (t->speed_hz || t->bits_per_word) {
- struct ep93xx_spi_chip tmp_chip = *chip;
-
- if (t->speed_hz) {
- int err;
-
- err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
- t->speed_hz);
- if (err) {
- dev_err(&espi->pdev->dev,
- "failed to adjust speed\n");
- msg->status = err;
- return;
- }
- }
-
- if (t->bits_per_word)
- tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
-
- /*
- * Set up temporary new hw settings for this transfer.
- */
- ep93xx_spi_chip_setup(espi, &tmp_chip);
+ err = ep93xx_spi_chip_setup(espi, chip, t->speed_hz, t->bits_per_word);
+ if (err) {
+ dev_err(&espi->pdev->dev,
+ "failed to setup chip for transfer\n");
+ msg->status = err;
+ return;
}
espi->rx = 0;
@@ -793,9 +657,6 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
ep93xx_spi_cs_control(msg->spi, true);
}
}
-
- if (t->speed_hz || t->bits_per_word)
- ep93xx_spi_chip_setup(espi, chip);
}
/*
@@ -848,10 +709,8 @@ static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
espi->fifo_level = 0;
/*
- * Update SPI controller registers according to spi device and assert
- * the chipselect.
+ * Assert the chipselect.
*/
- ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
ep93xx_spi_cs_control(msg->spi, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
@@ -868,50 +727,22 @@ static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
ep93xx_spi_disable(espi);
}
-#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
-
-/**
- * ep93xx_spi_work() - EP93xx SPI workqueue worker function
- * @work: work struct
- *
- * Workqueue worker function. This function is called when there are new
- * SPI messages to be processed. Message is taken out from the queue and then
- * passed to ep93xx_spi_process_message().
- *
- * After message is transferred, protocol driver is notified by calling
- * @msg->complete(). In case of error, @msg->status is set to negative error
- * number, otherwise it contains zero (and @msg->actual_length is updated).
- */
-static void ep93xx_spi_work(struct work_struct *work)
+static int ep93xx_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct ep93xx_spi *espi = work_to_espi(work);
- struct spi_message *msg;
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
- spin_lock_irq(&espi->lock);
- if (!espi->running || espi->current_msg ||
- list_empty(&espi->msg_queue)) {
- spin_unlock_irq(&espi->lock);
- return;
- }
- msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
- list_del_init(&msg->queue);
- espi->current_msg = msg;
- spin_unlock_irq(&espi->lock);
+ msg->state = NULL;
+ msg->status = 0;
+ msg->actual_length = 0;
+ espi->current_msg = msg;
ep93xx_spi_process_message(espi, msg);
-
- /*
- * Update the current message and re-schedule ourselves if there are
- * more messages in the queue.
- */
- spin_lock_irq(&espi->lock);
espi->current_msg = NULL;
- if (espi->running && !list_empty(&espi->msg_queue))
- queue_work(espi->wq, &espi->msg_work);
- spin_unlock_irq(&espi->lock);
- /* notify the protocol driver that we are done with this message */
- msg->complete(msg->context);
+ spi_finalize_current_message(master);
+
+ return 0;
}
static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
@@ -1023,7 +854,7 @@ static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
free_page((unsigned long)espi->zeropage);
}
-static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
+static int ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ep93xx_spi_info *info;
@@ -1032,92 +863,78 @@ static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
int irq;
int error;
- info = pdev->dev.platform_data;
+ info = dev_get_platdata(&pdev->dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resources\n");
+ return -EBUSY;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "unable to get iomem resource\n");
+ return -ENODEV;
+ }
master = spi_alloc_master(&pdev->dev, sizeof(*espi));
- if (!master) {
- dev_err(&pdev->dev, "failed to allocate spi master\n");
+ if (!master)
return -ENOMEM;
- }
master->setup = ep93xx_spi_setup;
- master->transfer = ep93xx_spi_transfer;
+ master->transfer_one_message = ep93xx_spi_transfer_one_message;
master->cleanup = ep93xx_spi_cleanup;
master->bus_num = pdev->id;
master->num_chipselect = info->num_chipselect;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
platform_set_drvdata(pdev, master);
espi = spi_master_get_devdata(master);
- espi->clk = clk_get(&pdev->dev, NULL);
+ espi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(espi->clk)) {
dev_err(&pdev->dev, "unable to get spi clock\n");
error = PTR_ERR(espi->clk);
goto fail_release_master;
}
- spin_lock_init(&espi->lock);
init_completion(&espi->wait);
/*
* Calculate maximum and minimum supported clock rates
* for the controller.
*/
- espi->max_rate = clk_get_rate(espi->clk) / 2;
- espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
+ master->max_speed_hz = clk_get_rate(espi->clk) / 2;
+ master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
espi->pdev = pdev;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- error = -EBUSY;
- dev_err(&pdev->dev, "failed to get irq resources\n");
- goto fail_put_clock;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "unable to get iomem resource\n");
- error = -ENODEV;
- goto fail_put_clock;
- }
-
espi->sspdr_phys = res->start + SSPDR;
- espi->regs_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!espi->regs_base) {
- dev_err(&pdev->dev, "failed to map resources\n");
- error = -ENODEV;
- goto fail_put_clock;
+ espi->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(espi->regs_base)) {
+ error = PTR_ERR(espi->regs_base);
+ goto fail_release_master;
}
error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
0, "ep93xx-spi", espi);
if (error) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto fail_put_clock;
+ goto fail_release_master;
}
if (info->use_dma && ep93xx_spi_setup_dma(espi))
dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
- espi->wq = create_singlethread_workqueue("ep93xx_spid");
- if (!espi->wq) {
- dev_err(&pdev->dev, "unable to create workqueue\n");
- goto fail_free_dma;
- }
- INIT_WORK(&espi->msg_work, ep93xx_spi_work);
- INIT_LIST_HEAD(&espi->msg_queue);
- espi->running = true;
-
/* make sure that the hardware is disabled */
ep93xx_spi_write_u8(espi, SSPCR1, 0);
- error = spi_register_master(master);
+ error = devm_spi_register_master(&pdev->dev, master);
if (error) {
dev_err(&pdev->dev, "failed to register SPI master\n");
- goto fail_free_queue;
+ goto fail_free_dma;
}
dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
@@ -1125,52 +942,21 @@ static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
return 0;
-fail_free_queue:
- destroy_workqueue(espi->wq);
fail_free_dma:
ep93xx_spi_release_dma(espi);
-fail_put_clock:
- clk_put(espi->clk);
fail_release_master:
spi_master_put(master);
- platform_set_drvdata(pdev, NULL);
return error;
}
-static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
+static int ep93xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct ep93xx_spi *espi = spi_master_get_devdata(master);
- spin_lock_irq(&espi->lock);
- espi->running = false;
- spin_unlock_irq(&espi->lock);
-
- destroy_workqueue(espi->wq);
-
- /*
- * Complete remaining messages with %-ESHUTDOWN status.
- */
- spin_lock_irq(&espi->lock);
- while (!list_empty(&espi->msg_queue)) {
- struct spi_message *msg;
-
- msg = list_first_entry(&espi->msg_queue,
- struct spi_message, queue);
- list_del_init(&msg->queue);
- msg->status = -ESHUTDOWN;
- spin_unlock_irq(&espi->lock);
- msg->complete(msg->context);
- spin_lock_irq(&espi->lock);
- }
- spin_unlock_irq(&espi->lock);
-
ep93xx_spi_release_dma(espi);
- clk_put(espi->clk);
- platform_set_drvdata(pdev, NULL);
- spi_unregister_master(master);
return 0;
}
@@ -1180,7 +966,7 @@ static struct platform_driver ep93xx_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ep93xx_spi_probe,
- .remove = __devexit_p(ep93xx_spi_remove),
+ .remove = ep93xx_spi_remove,
};
module_platform_driver(ep93xx_spi_driver);
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 8f6aa735a24..ba441ad9a00 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -11,7 +11,6 @@
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
-#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -312,9 +311,6 @@ static int falcon_sflash_setup(struct spi_device *spi)
unsigned int i;
unsigned long flags;
- if (spi->chip_select > 0)
- return -ENODEV;
-
spin_lock_irqsave(&ebu_lock, flags);
if (spi->max_speed_hz >= CLOCK_100M) {
@@ -398,12 +394,12 @@ static int falcon_sflash_xfer_one(struct spi_master *master,
}
m->status = ret;
- m->complete(m->context);
+ spi_finalize_current_message(master);
return 0;
}
-static int __devinit falcon_sflash_probe(struct platform_device *pdev)
+static int falcon_sflash_probe(struct platform_device *pdev)
{
struct falcon_sflash *priv;
struct spi_master *master;
@@ -422,8 +418,7 @@ static int __devinit falcon_sflash_probe(struct platform_device *pdev)
priv->master = master;
master->mode_bits = SPI_MODE_3;
- master->num_chipselect = 1;
- master->bus_num = -1;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
master->setup = falcon_sflash_setup;
master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
master->transfer_one_message = falcon_sflash_xfer_one;
@@ -432,21 +427,12 @@ static int __devinit falcon_sflash_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
spi_master_put(master);
return ret;
}
-static int __devexit falcon_sflash_remove(struct platform_device *pdev)
-{
- struct falcon_sflash *priv = platform_get_drvdata(pdev);
-
- spi_unregister_master(priv->master);
-
- return 0;
-}
-
static const struct of_device_id falcon_sflash_match[] = {
{ .compatible = "lantiq,sflash-falcon" },
{},
@@ -455,7 +441,6 @@ MODULE_DEVICE_TABLE(of, falcon_sflash_match);
static struct platform_driver falcon_sflash_driver = {
.probe = falcon_sflash_probe,
- .remove = __devexit_p(falcon_sflash_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
new file mode 100644
index 00000000000..54b06376f03
--- /dev/null
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -0,0 +1,388 @@
+/*
+ * Freescale SPI controller driver cpm functions.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/fsl_devices.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
+#include <asm/cpm.h>
+#include <asm/qe.h>
+
+#include "spi-fsl-lib.h"
+#include "spi-fsl-cpm.h"
+#include "spi-fsl-spi.h"
+
+/* CPM1 and CPM2 are mutually exclusive. */
+#ifdef CONFIG_CPM1
+#include <asm/cpm1.h>
+#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
+#else
+#include <asm/cpm2.h>
+#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
+#endif
+
+#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
+#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
+
+/* SPCOM register values */
+#define SPCOM_STR (1 << 23) /* Start transmit */
+
+#define SPI_PRAM_SIZE 0x100
+#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
+
+static void *fsl_dummy_rx;
+static DEFINE_MUTEX(fsl_dummy_rx_lock);
+static int fsl_dummy_rx_refcnt;
+
+void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
+{
+ if (mspi->flags & SPI_QE) {
+ qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ } else {
+ cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
+ if (mspi->flags & SPI_CPM1) {
+ out_be16(&mspi->pram->rbptr,
+ in_be16(&mspi->pram->rbase));
+ out_be16(&mspi->pram->tbptr,
+ in_be16(&mspi->pram->tbase));
+ }
+ }
+}
+
+static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
+{
+ struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
+ struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
+ unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
+ unsigned int xfer_ofs;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
+
+ if (mspi->rx_dma == mspi->dma_dummy_rx)
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
+ else
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
+ out_be16(&rx_bd->cbd_datlen, 0);
+ out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
+
+ if (mspi->tx_dma == mspi->dma_dummy_tx)
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
+ else
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
+ out_be16(&tx_bd->cbd_datlen, xfer_len);
+ out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
+ BD_SC_LAST);
+
+ /* start transfer */
+ mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
+}
+
+int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, bool is_dma_mapped)
+{
+ struct device *dev = mspi->dev;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ if (is_dma_mapped) {
+ mspi->map_tx_dma = 0;
+ mspi->map_rx_dma = 0;
+ } else {
+ mspi->map_tx_dma = 1;
+ mspi->map_rx_dma = 1;
+ }
+
+ if (!t->tx_buf) {
+ mspi->tx_dma = mspi->dma_dummy_tx;
+ mspi->map_tx_dma = 0;
+ }
+
+ if (!t->rx_buf) {
+ mspi->rx_dma = mspi->dma_dummy_rx;
+ mspi->map_rx_dma = 0;
+ }
+
+ if (mspi->map_tx_dma) {
+ void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
+
+ mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mspi->tx_dma)) {
+ dev_err(dev, "unable to map tx dma\n");
+ return -ENOMEM;
+ }
+ } else if (t->tx_buf) {
+ mspi->tx_dma = t->tx_dma;
+ }
+
+ if (mspi->map_rx_dma) {
+ mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mspi->rx_dma)) {
+ dev_err(dev, "unable to map rx dma\n");
+ goto err_rx_dma;
+ }
+ } else if (t->rx_buf) {
+ mspi->rx_dma = t->rx_dma;
+ }
+
+ /* enable rx ints */
+ mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB);
+
+ mspi->xfer_in_progress = t;
+ mspi->count = t->len;
+
+ /* start CPM transfers */
+ fsl_spi_cpm_bufs_start(mspi);
+
+ return 0;
+
+err_rx_dma:
+ if (mspi->map_tx_dma)
+ dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct spi_transfer *t = mspi->xfer_in_progress;
+
+ if (mspi->map_tx_dma)
+ dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
+ if (mspi->map_rx_dma)
+ dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
+ mspi->xfer_in_progress = NULL;
+}
+
+void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
+{
+ u16 len;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
+ in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
+
+ len = in_be16(&mspi->rx_bd->cbd_datlen);
+ if (len > mspi->count) {
+ WARN_ON(1);
+ len = mspi->count;
+ }
+
+ /* Clear the events */
+ mpc8xxx_spi_write_reg(&reg_base->event, events);
+
+ mspi->count -= len;
+ if (mspi->count)
+ fsl_spi_cpm_bufs_start(mspi);
+ else
+ complete(&mspi->done);
+}
+
+static void *fsl_spi_alloc_dummy_rx(void)
+{
+ mutex_lock(&fsl_dummy_rx_lock);
+
+ if (!fsl_dummy_rx)
+ fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
+ if (fsl_dummy_rx)
+ fsl_dummy_rx_refcnt++;
+
+ mutex_unlock(&fsl_dummy_rx_lock);
+
+ return fsl_dummy_rx;
+}
+
+static void fsl_spi_free_dummy_rx(void)
+{
+ mutex_lock(&fsl_dummy_rx_lock);
+
+ switch (fsl_dummy_rx_refcnt) {
+ case 0:
+ WARN_ON(1);
+ break;
+ case 1:
+ kfree(fsl_dummy_rx);
+ fsl_dummy_rx = NULL;
+ /* fall through */
+ default:
+ fsl_dummy_rx_refcnt--;
+ break;
+ }
+
+ mutex_unlock(&fsl_dummy_rx_lock);
+}
+
+static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct device_node *np = dev->of_node;
+ const u32 *iprop;
+ int size;
+ void __iomem *spi_base;
+ unsigned long pram_ofs = -ENOMEM;
+
+ /* Can't use of_address_to_resource(), QE muram isn't at 0. */
+ iprop = of_get_property(np, "reg", &size);
+
+ /* QE with a fixed pram location? */
+ if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
+ return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
+
+ /* QE but with a dynamic pram location? */
+ if (mspi->flags & SPI_QE) {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
+ return pram_ofs;
+ }
+
+ spi_base = of_iomap(np, 1);
+ if (spi_base == NULL)
+ return -EINVAL;
+
+ if (mspi->flags & SPI_CPM2) {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ out_be16(spi_base, pram_ofs);
+ } else {
+ struct spi_pram __iomem *pram = spi_base;
+ u16 rpbase = in_be16(&pram->rpbase);
+
+ /* Microcode relocation patch applied? */
+ if (rpbase) {
+ pram_ofs = rpbase;
+ } else {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ out_be16(spi_base, pram_ofs);
+ }
+ }
+
+ iounmap(spi_base);
+ return pram_ofs;
+}
+
+int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct device_node *np = dev->of_node;
+ const u32 *iprop;
+ int size;
+ unsigned long pram_ofs;
+ unsigned long bds_ofs;
+
+ if (!(mspi->flags & SPI_CPM_MODE))
+ return 0;
+
+ if (!fsl_spi_alloc_dummy_rx())
+ return -ENOMEM;
+
+ if (mspi->flags & SPI_QE) {
+ iprop = of_get_property(np, "cell-index", &size);
+ if (iprop && size == sizeof(*iprop))
+ mspi->subblock = *iprop;
+
+ switch (mspi->subblock) {
+ default:
+ dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
+ /* fall through */
+ case 0:
+ mspi->subblock = QE_CR_SUBBLOCK_SPI1;
+ break;
+ case 1:
+ mspi->subblock = QE_CR_SUBBLOCK_SPI2;
+ break;
+ }
+ }
+
+ pram_ofs = fsl_spi_cpm_get_pram(mspi);
+ if (IS_ERR_VALUE(pram_ofs)) {
+ dev_err(dev, "can't allocate spi parameter ram\n");
+ goto err_pram;
+ }
+
+ bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
+ sizeof(*mspi->rx_bd), 8);
+ if (IS_ERR_VALUE(bds_ofs)) {
+ dev_err(dev, "can't allocate bds\n");
+ goto err_bds;
+ }
+
+ mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
+ dev_err(dev, "unable to map dummy tx buffer\n");
+ goto err_dummy_tx;
+ }
+
+ mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
+ dev_err(dev, "unable to map dummy rx buffer\n");
+ goto err_dummy_rx;
+ }
+
+ mspi->pram = cpm_muram_addr(pram_ofs);
+
+ mspi->tx_bd = cpm_muram_addr(bds_ofs);
+ mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
+
+ /* Initialize parameter ram. */
+ out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
+ out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
+ out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
+ out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
+ out_be16(&mspi->pram->mrblr, SPI_MRBLR);
+ out_be32(&mspi->pram->rstate, 0);
+ out_be32(&mspi->pram->rdp, 0);
+ out_be16(&mspi->pram->rbptr, 0);
+ out_be16(&mspi->pram->rbc, 0);
+ out_be32(&mspi->pram->rxtmp, 0);
+ out_be32(&mspi->pram->tstate, 0);
+ out_be32(&mspi->pram->tdp, 0);
+ out_be16(&mspi->pram->tbptr, 0);
+ out_be16(&mspi->pram->tbc, 0);
+ out_be32(&mspi->pram->txtmp, 0);
+
+ return 0;
+
+err_dummy_rx:
+ dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
+err_dummy_tx:
+ cpm_muram_free(bds_ofs);
+err_bds:
+ cpm_muram_free(pram_ofs);
+err_pram:
+ fsl_spi_free_dummy_rx();
+ return -ENOMEM;
+}
+
+void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+
+ if (!(mspi->flags & SPI_CPM_MODE))
+ return;
+
+ dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
+ cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
+ cpm_muram_free(cpm_muram_offset(mspi->pram));
+ fsl_spi_free_dummy_rx();
+}
diff --git a/drivers/spi/spi-fsl-cpm.h b/drivers/spi/spi-fsl-cpm.h
new file mode 100644
index 00000000000..c7111580548
--- /dev/null
+++ b/drivers/spi/spi-fsl-cpm.h
@@ -0,0 +1,43 @@
+/*
+ * Freescale SPI controller driver cpm functions.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __SPI_FSL_CPM_H__
+#define __SPI_FSL_CPM_H__
+
+#include "spi-fsl-lib.h"
+
+#ifdef CONFIG_FSL_SOC
+extern void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi);
+extern int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, bool is_dma_mapped);
+extern void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi);
+extern void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events);
+extern int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi);
+extern void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi);
+#else
+static inline void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi) { }
+static inline int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t,
+ bool is_dma_mapped) { return 0; }
+static inline void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) { }
+static inline void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) { }
+static inline int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) { return 0; }
+static inline void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) { }
+#endif
+
+#endif /* __SPI_FSL_CPM_H__ */
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
new file mode 100644
index 00000000000..5021ddf03f6
--- /dev/null
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -0,0 +1,574 @@
+/*
+ * drivers/spi/spi-fsl-dspi.c
+ *
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Freescale DSPI driver
+ * This file contains a driver for the Freescale DSPI
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#define DRIVER_NAME "fsl-dspi"
+
+#define TRAN_STATE_RX_VOID 0x01
+#define TRAN_STATE_TX_VOID 0x02
+#define TRAN_STATE_WORD_ODD_NUM 0x04
+
+#define DSPI_FIFO_SIZE 4
+
+#define SPI_MCR 0x00
+#define SPI_MCR_MASTER (1 << 31)
+#define SPI_MCR_PCSIS (0x3F << 16)
+#define SPI_MCR_CLR_TXF (1 << 11)
+#define SPI_MCR_CLR_RXF (1 << 10)
+
+#define SPI_TCR 0x08
+
+#define SPI_CTAR(x) (0x0c + (x * 4))
+#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
+#define SPI_CTAR_CPOL(x) ((x) << 26)
+#define SPI_CTAR_CPHA(x) ((x) << 25)
+#define SPI_CTAR_LSBFE(x) ((x) << 24)
+#define SPI_CTAR_PCSSCR(x) (((x) & 0x00000003) << 22)
+#define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20)
+#define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18)
+#define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16)
+#define SPI_CTAR_CSSCK(x) (((x) & 0x0000000f) << 12)
+#define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8)
+#define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4)
+#define SPI_CTAR_BR(x) ((x) & 0x0000000f)
+
+#define SPI_CTAR0_SLAVE 0x0c
+
+#define SPI_SR 0x2c
+#define SPI_SR_EOQF 0x10000000
+
+#define SPI_RSER 0x30
+#define SPI_RSER_EOQFE 0x10000000
+
+#define SPI_PUSHR 0x34
+#define SPI_PUSHR_CONT (1 << 31)
+#define SPI_PUSHR_CTAS(x) (((x) & 0x00000007) << 28)
+#define SPI_PUSHR_EOQ (1 << 27)
+#define SPI_PUSHR_CTCNT (1 << 26)
+#define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16)
+#define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff)
+
+#define SPI_PUSHR_SLAVE 0x34
+
+#define SPI_POPR 0x38
+#define SPI_POPR_RXDATA(x) ((x) & 0x0000ffff)
+
+#define SPI_TXFR0 0x3c
+#define SPI_TXFR1 0x40
+#define SPI_TXFR2 0x44
+#define SPI_TXFR3 0x48
+#define SPI_RXFR0 0x7c
+#define SPI_RXFR1 0x80
+#define SPI_RXFR2 0x84
+#define SPI_RXFR3 0x88
+
+#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
+#define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf)
+#define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf)
+#define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7)
+
+#define SPI_CS_INIT 0x01
+#define SPI_CS_ASSERT 0x02
+#define SPI_CS_DROP 0x04
+
+struct chip_data {
+ u32 mcr_val;
+ u32 ctar_val;
+ u16 void_write_data;
+};
+
+struct fsl_dspi {
+ struct spi_bitbang bitbang;
+ struct platform_device *pdev;
+
+ struct regmap *regmap;
+ int irq;
+ struct clk *clk;
+
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ char dataflags;
+ u8 cs;
+ u16 void_write_data;
+
+ wait_queue_head_t waitq;
+ u32 waitflags;
+};
+
+static inline int is_double_byte_mode(struct fsl_dspi *dspi)
+{
+ unsigned int val;
+
+ regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val);
+
+ return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
+}
+
+static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
+ unsigned long clkrate)
+{
+ /* Valid baud rate pre-scaler values */
+ int pbr_tbl[4] = {2, 3, 5, 7};
+ int brs[16] = { 2, 4, 6, 8,
+ 16, 32, 64, 128,
+ 256, 512, 1024, 2048,
+ 4096, 8192, 16384, 32768 };
+ int temp, i = 0, j = 0;
+
+ temp = clkrate / 2 / speed_hz;
+
+ for (i = 0; i < ARRAY_SIZE(pbr_tbl); i++)
+ for (j = 0; j < ARRAY_SIZE(brs); j++) {
+ if (pbr_tbl[i] * brs[j] >= temp) {
+ *pbr = i;
+ *br = j;
+ return;
+ }
+ }
+
+ pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld\
+ ,we use the max prescaler value.\n", speed_hz, clkrate);
+ *pbr = ARRAY_SIZE(pbr_tbl) - 1;
+ *br = ARRAY_SIZE(brs) - 1;
+}
+
+static int dspi_transfer_write(struct fsl_dspi *dspi)
+{
+ int tx_count = 0;
+ int tx_word;
+ u16 d16;
+ u8 d8;
+ u32 dspi_pushr = 0;
+ int first = 1;
+
+ tx_word = is_double_byte_mode(dspi);
+
+ /* If we are in word mode, but only have a single byte to transfer
+ * then switch to byte mode temporarily. Will switch back at the
+ * end of the transfer.
+ */
+ if (tx_word && (dspi->len == 1)) {
+ dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
+ tx_word = 0;
+ }
+
+ while (dspi->len && (tx_count < DSPI_FIFO_SIZE)) {
+ if (tx_word) {
+ if (dspi->len == 1)
+ break;
+
+ if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) {
+ d16 = *(u16 *)dspi->tx;
+ dspi->tx += 2;
+ } else {
+ d16 = dspi->void_write_data;
+ }
+
+ dspi_pushr = SPI_PUSHR_TXDATA(d16) |
+ SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(dspi->cs) |
+ SPI_PUSHR_CONT;
+
+ dspi->len -= 2;
+ } else {
+ if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) {
+
+ d8 = *(u8 *)dspi->tx;
+ dspi->tx++;
+ } else {
+ d8 = (u8)dspi->void_write_data;
+ }
+
+ dspi_pushr = SPI_PUSHR_TXDATA(d8) |
+ SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(dspi->cs) |
+ SPI_PUSHR_CONT;
+
+ dspi->len--;
+ }
+
+ if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
+ /* last transfer in the transfer */
+ dspi_pushr |= SPI_PUSHR_EOQ;
+ } else if (tx_word && (dspi->len == 1))
+ dspi_pushr |= SPI_PUSHR_EOQ;
+
+ if (first) {
+ first = 0;
+ dspi_pushr |= SPI_PUSHR_CTCNT; /* clear counter */
+ }
+
+ regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
+
+ tx_count++;
+ }
+
+ return tx_count * (tx_word + 1);
+}
+
+static int dspi_transfer_read(struct fsl_dspi *dspi)
+{
+ int rx_count = 0;
+ int rx_word = is_double_byte_mode(dspi);
+ u16 d;
+ while ((dspi->rx < dspi->rx_end)
+ && (rx_count < DSPI_FIFO_SIZE)) {
+ if (rx_word) {
+ unsigned int val;
+
+ if ((dspi->rx_end - dspi->rx) == 1)
+ break;
+
+ regmap_read(dspi->regmap, SPI_POPR, &val);
+ d = SPI_POPR_RXDATA(val);
+
+ if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
+ *(u16 *)dspi->rx = d;
+ dspi->rx += 2;
+
+ } else {
+ unsigned int val;
+
+ regmap_read(dspi->regmap, SPI_POPR, &val);
+ d = SPI_POPR_RXDATA(val);
+ if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
+ *(u8 *)dspi->rx = d;
+ dspi->rx++;
+ }
+ rx_count++;
+ }
+
+ return rx_count;
+}
+
+static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
+ dspi->cur_transfer = t;
+ dspi->cur_chip = spi_get_ctldata(spi);
+ dspi->cs = spi->chip_select;
+ dspi->void_write_data = dspi->cur_chip->void_write_data;
+
+ dspi->dataflags = 0;
+ dspi->tx = (void *)t->tx_buf;
+ dspi->tx_end = dspi->tx + t->len;
+ dspi->rx = t->rx_buf;
+ dspi->rx_end = dspi->rx + t->len;
+ dspi->len = t->len;
+
+ if (!dspi->rx)
+ dspi->dataflags |= TRAN_STATE_RX_VOID;
+
+ if (!dspi->tx)
+ dspi->dataflags |= TRAN_STATE_TX_VOID;
+
+ regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
+ regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val);
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
+
+ if (t->speed_hz)
+ regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
+ dspi->cur_chip->ctar_val);
+
+ dspi_transfer_write(dspi);
+
+ if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
+ dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
+ dspi->waitflags = 0;
+
+ return t->len - dspi->len;
+}
+
+static void dspi_chipselect(struct spi_device *spi, int value)
+{
+ struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
+ unsigned int pushr;
+
+ regmap_read(dspi->regmap, SPI_PUSHR, &pushr);
+
+ switch (value) {
+ case BITBANG_CS_ACTIVE:
+ pushr |= SPI_PUSHR_CONT;
+ break;
+ case BITBANG_CS_INACTIVE:
+ pushr &= ~SPI_PUSHR_CONT;
+ break;
+ }
+
+ regmap_write(dspi->regmap, SPI_PUSHR, pushr);
+}
+
+static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct chip_data *chip;
+ struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
+ unsigned char br = 0, pbr = 0, fmsz = 0;
+
+ /* Only alloc on first setup */
+ chip = spi_get_ctldata(spi);
+ if (chip == NULL) {
+ chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+ }
+
+ chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
+ if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
+ fmsz = spi->bits_per_word - 1;
+ } else {
+ pr_err("Invalid wordsize\n");
+ return -ENODEV;
+ }
+
+ chip->void_write_data = 0;
+
+ hz_to_spi_baud(&pbr, &br,
+ spi->max_speed_hz, clk_get_rate(dspi->clk));
+
+ chip->ctar_val = SPI_CTAR_FMSZ(fmsz)
+ | SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
+ | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
+ | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
+ | SPI_CTAR_PBR(pbr)
+ | SPI_CTAR_BR(br);
+
+ spi_set_ctldata(spi, chip);
+
+ return 0;
+}
+
+static int dspi_setup(struct spi_device *spi)
+{
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ return dspi_setup_transfer(spi, NULL);
+}
+
+static irqreturn_t dspi_interrupt(int irq, void *dev_id)
+{
+ struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
+
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
+
+ dspi_transfer_read(dspi);
+
+ if (!dspi->len) {
+ if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
+
+ dspi->waitflags = 1;
+ wake_up_interruptible(&dspi->waitq);
+ } else {
+ dspi_transfer_write(dspi);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id fsl_dspi_dt_ids[] = {
+ { .compatible = "fsl,vf610-dspi", .data = NULL, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int dspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+ clk_disable_unprepare(dspi->clk);
+
+ return 0;
+}
+
+static int dspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ clk_prepare_enable(dspi->clk);
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+
+static struct regmap_config dspi_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x88,
+};
+
+static int dspi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_master *master;
+ struct fsl_dspi *dspi;
+ struct resource *res;
+ void __iomem *base;
+ int ret = 0, cs_num, bus_num;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
+ if (!master)
+ return -ENOMEM;
+
+ dspi = spi_master_get_devdata(master);
+ dspi->pdev = pdev;
+ dspi->bitbang.master = master;
+ dspi->bitbang.chipselect = dspi_chipselect;
+ dspi->bitbang.setup_transfer = dspi_setup_transfer;
+ dspi->bitbang.txrx_bufs = dspi_txrx_transfer;
+ dspi->bitbang.master->setup = dspi_setup;
+ dspi->bitbang.master->dev.of_node = pdev->dev.of_node;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
+ SPI_BPW_MASK(16);
+
+ ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
+ goto out_master_put;
+ }
+ master->num_chipselect = cs_num;
+
+ ret = of_property_read_u32(np, "bus-num", &bus_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get bus-num\n");
+ goto out_master_put;
+ }
+ master->bus_num = bus_num;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out_master_put;
+ }
+
+ dspi_regmap_config.lock_arg = dspi;
+ dspi_regmap_config.val_format_endian =
+ of_property_read_bool(np, "big-endian")
+ ? REGMAP_ENDIAN_BIG : REGMAP_ENDIAN_DEFAULT;
+ dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base,
+ &dspi_regmap_config);
+ if (IS_ERR(dspi->regmap)) {
+ dev_err(&pdev->dev, "failed to init regmap: %ld\n",
+ PTR_ERR(dspi->regmap));
+ return PTR_ERR(dspi->regmap);
+ }
+
+ dspi->irq = platform_get_irq(pdev, 0);
+ if (dspi->irq < 0) {
+ dev_err(&pdev->dev, "can't get platform irq\n");
+ ret = dspi->irq;
+ goto out_master_put;
+ }
+
+ ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
+ pdev->name, dspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
+ goto out_master_put;
+ }
+
+ dspi->clk = devm_clk_get(&pdev->dev, "dspi");
+ if (IS_ERR(dspi->clk)) {
+ ret = PTR_ERR(dspi->clk);
+ dev_err(&pdev->dev, "unable to get clock\n");
+ goto out_master_put;
+ }
+ clk_prepare_enable(dspi->clk);
+
+ init_waitqueue_head(&dspi->waitq);
+ platform_set_drvdata(pdev, master);
+
+ ret = spi_bitbang_start(&dspi->bitbang);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Problem registering DSPI master\n");
+ goto out_clk_put;
+ }
+
+ pr_info(KERN_INFO "Freescale DSPI master initialized\n");
+ return ret;
+
+out_clk_put:
+ clk_disable_unprepare(dspi->clk);
+out_master_put:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int dspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ /* Disconnect from the SPI framework */
+ spi_bitbang_stop(&dspi->bitbang);
+ clk_disable_unprepare(dspi->clk);
+ spi_master_put(dspi->bitbang.master);
+
+ return 0;
+}
+
+static struct platform_driver fsl_dspi_driver = {
+ .driver.name = DRIVER_NAME,
+ .driver.of_match_table = fsl_dspi_dt_ids,
+ .driver.owner = THIS_MODULE,
+ .driver.pm = &dspi_pm,
+ .probe = dspi_probe,
+ .remove = dspi_remove,
+};
+module_platform_driver(fsl_dspi_driver);
+
+MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 27bdc47b525..8ebd724e4c5 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -16,6 +16,8 @@
#include <linux/fsl_devices.h>
#include <linux/mm.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/err.h>
@@ -144,10 +146,6 @@ static int fsl_espi_setup_transfer(struct spi_device *spi,
if (!bits_per_word)
bits_per_word = spi->bits_per_word;
- /* Make sure its a bit width we support [4..16] */
- if ((bits_per_word < 4) || (bits_per_word > 16))
- return -EINVAL;
-
if (!hz)
hz = spi->max_speed_hz;
@@ -157,12 +155,10 @@ static int fsl_espi_setup_transfer(struct spi_device *spi,
cs->get_tx = mpc8xxx_spi_tx_buf_u32;
if (bits_per_word <= 8) {
cs->rx_shift = 8 - bits_per_word;
- } else if (bits_per_word <= 16) {
+ } else {
cs->rx_shift = 16 - bits_per_word;
if (spi->mode & SPI_LSB_FIRST)
cs->get_tx = fsl_espi_tx_buf_lsb;
- } else {
- return -EINVAL;
}
mpc8xxx_spi->rx_shift = cs->rx_shift;
@@ -223,20 +219,15 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
unsigned int len = t->len;
- u8 bits_per_word;
int ret;
- bits_per_word = spi->bits_per_word;
- if (t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
mpc8xxx_spi->len = t->len;
len = roundup(len, 4) / 4;
mpc8xxx_spi->tx = t->tx_buf;
mpc8xxx_spi->rx = t->rx_buf;
- INIT_COMPLETION(mpc8xxx_spi->done);
+ reinit_completion(&mpc8xxx_spi->done);
/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
@@ -295,8 +286,8 @@ static void fsl_espi_do_trans(struct spi_message *m,
if ((first->bits_per_word != t->bits_per_word) ||
(first->speed_hz != t->speed_hz)) {
espi_trans->status = -EINVAL;
- dev_err(mspi->dev, "bits_per_word/speed_hz should be"
- " same for the same SPI transfer\n");
+ dev_err(mspi->dev,
+ "bits_per_word/speed_hz should be same for the same SPI transfer\n");
return;
}
@@ -357,7 +348,7 @@ static void fsl_espi_cmd_trans(struct spi_message *m,
}
espi_trans->tx_buf = local_buf;
- espi_trans->rx_buf = local_buf + espi_trans->n_tx;
+ espi_trans->rx_buf = local_buf;
fsl_espi_do_trans(m, espi_trans);
espi_trans->actual_length = espi_trans->len;
@@ -406,7 +397,7 @@ static void fsl_espi_rw_trans(struct spi_message *m,
espi_trans->n_rx = trans_len;
espi_trans->len = trans_len + n_tx;
espi_trans->tx_buf = local_buf;
- espi_trans->rx_buf = local_buf + n_tx;
+ espi_trans->rx_buf = local_buf;
fsl_espi_do_trans(m, espi_trans);
memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len);
@@ -450,7 +441,8 @@ static void fsl_espi_do_one_msg(struct spi_message *m)
m->actual_length = espi_trans.actual_length;
m->status = espi_trans.status;
- m->complete(m->context);
+ if (m->complete)
+ m->complete(m->context);
}
static int fsl_espi_setup(struct spi_device *spi)
@@ -466,7 +458,7 @@ static int fsl_espi_setup(struct spi_device *spi)
return -EINVAL;
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
@@ -587,15 +579,17 @@ static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
iounmap(mspi->reg_base);
}
-static struct spi_master * __devinit fsl_espi_probe(struct device *dev,
+static struct spi_master * fsl_espi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_espi_reg *reg_base;
- u32 regval;
- int i, ret = 0;
+ struct device_node *nc;
+ const __be32 *prop;
+ u32 regval, csmode;
+ int i, len, ret = 0;
master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
if (!master) {
@@ -609,6 +603,7 @@ static struct spi_master * __devinit fsl_espi_probe(struct device *dev,
if (ret)
goto err_probe;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
master->setup = fsl_espi_setup;
mpc8xxx_spi = spi_master_get_devdata(master);
@@ -641,8 +636,32 @@ static struct spi_master * __devinit fsl_espi_probe(struct device *dev,
mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
/* Init eSPI CS mode register */
- for (i = 0; i < pdata->max_chipselect; i++)
- mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL);
+ for_each_available_child_of_node(master->dev.of_node, nc) {
+ /* get chip select */
+ prop = of_get_property(nc, "reg", &len);
+ if (!prop || len < sizeof(*prop))
+ continue;
+ i = be32_to_cpup(prop);
+ if (i < 0 || i >= pdata->max_chipselect)
+ continue;
+
+ csmode = CSMODE_INIT_VAL;
+ /* check if CSBEF is set in device tree */
+ prop = of_get_property(nc, "fsl,csbef", &len);
+ if (prop && len >= sizeof(*prop)) {
+ csmode &= ~(CSMODE_BEF(0xf));
+ csmode |= CSMODE_BEF(be32_to_cpup(prop));
+ }
+ /* check if CSAFT is set in device tree */
+ prop = of_get_property(nc, "fsl,csaft", &len);
+ if (prop && len >= sizeof(*prop)) {
+ csmode &= ~(CSMODE_AFT(0xf));
+ csmode |= CSMODE_AFT(be32_to_cpup(prop));
+ }
+ mpc8xxx_spi_write_reg(&reg_base->csmode[i], csmode);
+
+ dev_info(dev, "cs=%d, init_csmode=0x%x\n", i, csmode);
+ }
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
@@ -670,7 +689,7 @@ err:
static int of_fsl_espi_get_chipselects(struct device *dev)
{
struct device_node *np = dev->of_node;
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
const u32 *prop;
int len;
@@ -686,13 +705,13 @@ static int of_fsl_espi_get_chipselects(struct device *dev)
return 0;
}
-static int __devinit of_fsl_espi_probe(struct platform_device *ofdev)
+static int of_fsl_espi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
- struct resource irq;
+ unsigned int irq;
int ret = -ENOMEM;
ret = of_mpc8xxx_spi_probe(ofdev);
@@ -707,13 +726,13 @@ static int __devinit of_fsl_espi_probe(struct platform_device *ofdev)
if (ret)
goto err;
- ret = of_irq_to_resource(np, 0, &irq);
- if (!ret) {
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
ret = -EINVAL;
goto err;
}
- master = fsl_espi_probe(dev, &mem, irq.start);
+ master = fsl_espi_probe(dev, &mem, irq);
if (IS_ERR(master)) {
ret = PTR_ERR(master);
goto err;
@@ -725,11 +744,71 @@ err:
return ret;
}
-static int __devexit of_fsl_espi_remove(struct platform_device *dev)
+static int of_fsl_espi_remove(struct platform_device *dev)
{
return mpc8xxx_spi_remove(&dev->dev);
}
+#ifdef CONFIG_PM_SLEEP
+static int of_fsl_espi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ u32 regval;
+ int ret;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ ret = spi_master_suspend(master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
+ }
+
+ regval = mpc8xxx_spi_read_reg(&reg_base->mode);
+ regval &= ~SPMODE_ENABLE;
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ return 0;
+}
+
+static int of_fsl_espi_resume(struct device *dev)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ u32 regval;
+ int i;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ /* SPI controller initializations */
+ mpc8xxx_spi_write_reg(&reg_base->mode, 0);
+ mpc8xxx_spi_write_reg(&reg_base->mask, 0);
+ mpc8xxx_spi_write_reg(&reg_base->command, 0);
+ mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
+
+ /* Init eSPI CS mode register */
+ for (i = 0; i < pdata->max_chipselect; i++)
+ mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL);
+
+ /* Enable SPI interface */
+ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops espi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume)
+};
+
static const struct of_device_id of_fsl_espi_match[] = {
{ .compatible = "fsl,mpc8536-espi" },
{}
@@ -741,9 +820,10 @@ static struct platform_driver fsl_espi_driver = {
.name = "fsl_espi",
.owner = THIS_MODULE,
.of_match_table = of_fsl_espi_match,
+ .pm = &espi_pm,
},
.probe = of_fsl_espi_probe,
- .remove = __devexit_p(of_fsl_espi_remove),
+ .remove = of_fsl_espi_remove,
};
module_platform_driver(fsl_espi_driver);
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 1503574b215..95212ea96c8 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -23,7 +23,9 @@
#include <linux/mm.h>
#include <linux/of_platform.h>
#include <linux/spi/spi.h>
+#ifdef CONFIG_FSL_SOC
#include <sysdev/fsl_soc.h>
+#endif
#include "spi-fsl-lib.h"
@@ -59,7 +61,7 @@ struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
}
-void mpc8xxx_spi_work(struct work_struct *work)
+static void mpc8xxx_spi_work(struct work_struct *work)
{
struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi,
work);
@@ -97,11 +99,6 @@ int mpc8xxx_spi_transfer(struct spi_device *spi,
return 0;
}
-void mpc8xxx_spi_cleanup(struct spi_device *spi)
-{
- kfree(spi->controller_state);
-}
-
const char *mpc8xxx_spi_strmode(unsigned int flags)
{
if (flags & SPI_QE_CPU_MODE) {
@@ -120,7 +117,7 @@ const char *mpc8xxx_spi_strmode(unsigned int flags)
int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
int ret = 0;
@@ -132,7 +129,6 @@ int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
| SPI_LSB_FIRST | SPI_LOOP;
master->transfer = mpc8xxx_spi_transfer;
- master->cleanup = mpc8xxx_spi_cleanup;
master->dev.of_node = dev->of_node;
mpc8xxx_spi = spi_master_get_devdata(master);
@@ -169,7 +165,7 @@ err:
return ret;
}
-int __devexit mpc8xxx_spi_remove(struct device *dev)
+int mpc8xxx_spi_remove(struct device *dev)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct spi_master *master;
@@ -189,7 +185,7 @@ int __devexit mpc8xxx_spi_remove(struct device *dev)
return 0;
}
-int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev)
+int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -198,7 +194,7 @@ int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev)
const void *prop;
int ret = -ENOMEM;
- pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(&ofdev->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
@@ -208,15 +204,19 @@ int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev)
/* Allocate bus num dynamically. */
pdata->bus_num = -1;
+#ifdef CONFIG_FSL_SOC
/* SPI controller is either clocked from QE or SoC clock. */
pdata->sysclk = get_brgfreq();
if (pdata->sysclk == -1) {
pdata->sysclk = fsl_get_sys_freq();
- if (pdata->sysclk == -1) {
- ret = -ENODEV;
- goto err;
- }
+ if (pdata->sysclk == -1)
+ return -ENODEV;
}
+#else
+ ret = of_property_read_u32(np, "clock-frequency", &pdata->sysclk);
+ if (ret)
+ return ret;
+#endif
prop = of_get_property(np, "mode", NULL);
if (prop && !strcmp(prop, "cpu-qe"))
@@ -229,8 +229,4 @@ int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev)
pdata->flags = SPI_CPM_MODE | SPI_CPM1;
return 0;
-
-err:
- kfree(pinfo);
- return ret;
}
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index cbe881b9ea7..2fcbfd01d10 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -34,8 +34,10 @@ struct mpc8xxx_spi {
int subblock;
struct spi_pram __iomem *pram;
+#ifdef CONFIG_FSL_SOC
struct cpm_buf_desc __iomem *tx_bd;
struct cpm_buf_desc __iomem *rx_bd;
+#endif
struct spi_transfer *xfer_in_progress;
@@ -67,6 +69,15 @@ struct mpc8xxx_spi {
unsigned int flags;
+#ifdef CONFIG_SPI_FSL_SPI
+ int type;
+ int native_chipselects;
+ u8 max_bits_per_word;
+
+ void (*set_shifts)(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first);
+#endif
+
struct workqueue_struct *workqueue;
struct work_struct work;
@@ -87,12 +98,12 @@ struct spi_mpc8xxx_cs {
static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val)
{
- out_be32(reg, val);
+ iowrite32be(val, reg);
}
static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
{
- return in_be32(reg);
+ return ioread32be(reg);
}
struct mpc8xxx_spi_probe_info {
@@ -113,7 +124,6 @@ extern struct mpc8xxx_spi_probe_info *to_of_pinfo(
extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, unsigned int len);
extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m);
-extern void mpc8xxx_spi_cleanup(struct spi_device *spi);
extern const char *mpc8xxx_spi_strmode(unsigned int flags);
extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 6a62934ca74..98ccd231bf0 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -10,6 +10,10 @@
* Copyright (c) 2009 MontaVista Software, Inc.
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
+ * GRLIB support:
+ * Copyright (c) 2012 Aeroflex Gaisler AB.
+ * Author: Andreas Larsson <andreas@gaisler.com>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -30,75 +34,54 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <sysdev/fsl_soc.h>
-#include <asm/cpm.h>
-#include <asm/qe.h>
-
#include "spi-fsl-lib.h"
+#include "spi-fsl-cpm.h"
+#include "spi-fsl-spi.h"
-/* CPM1 and CPM2 are mutually exclusive. */
-#ifdef CONFIG_CPM1
-#include <asm/cpm1.h>
-#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
-#else
-#include <asm/cpm2.h>
-#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
-#endif
-
-/* SPI Controller registers */
-struct fsl_spi_reg {
- u8 res1[0x20];
- __be32 mode;
- __be32 event;
- __be32 mask;
- __be32 command;
- __be32 transmit;
- __be32 receive;
-};
-
-/* SPI Controller mode register definitions */
-#define SPMODE_LOOP (1 << 30)
-#define SPMODE_CI_INACTIVEHIGH (1 << 29)
-#define SPMODE_CP_BEGIN_EDGECLK (1 << 28)
-#define SPMODE_DIV16 (1 << 27)
-#define SPMODE_REV (1 << 26)
-#define SPMODE_MS (1 << 25)
-#define SPMODE_ENABLE (1 << 24)
-#define SPMODE_LEN(x) ((x) << 20)
-#define SPMODE_PM(x) ((x) << 16)
-#define SPMODE_OP (1 << 14)
-#define SPMODE_CG(x) ((x) << 7)
+#define TYPE_FSL 0
+#define TYPE_GRLIB 1
-/*
- * Default for SPI Mode:
- * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk
- */
-#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \
- SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf))
-
-/* SPIE register values */
-#define SPIE_NE 0x00000200 /* Not empty */
-#define SPIE_NF 0x00000100 /* Not full */
+struct fsl_spi_match_data {
+ int type;
+};
-/* SPIM register values */
-#define SPIM_NE 0x00000200 /* Not empty */
-#define SPIM_NF 0x00000100 /* Not full */
+static struct fsl_spi_match_data of_fsl_spi_fsl_config = {
+ .type = TYPE_FSL,
+};
-#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
-#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
+static struct fsl_spi_match_data of_fsl_spi_grlib_config = {
+ .type = TYPE_GRLIB,
+};
-/* SPCOM register values */
-#define SPCOM_STR (1 << 23) /* Start transmit */
+static struct of_device_id of_fsl_spi_match[] = {
+ {
+ .compatible = "fsl,spi",
+ .data = &of_fsl_spi_fsl_config,
+ },
+ {
+ .compatible = "aeroflexgaisler,spictrl",
+ .data = &of_fsl_spi_grlib_config,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_fsl_spi_match);
-#define SPI_PRAM_SIZE 0x100
-#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
+static int fsl_spi_get_type(struct device *dev)
+{
+ const struct of_device_id *match;
-static void *fsl_dummy_rx;
-static DEFINE_MUTEX(fsl_dummy_rx_lock);
-static int fsl_dummy_rx_refcnt;
+ if (dev->of_node) {
+ match = of_match_node(of_fsl_spi_match, dev->of_node);
+ if (match && match->data)
+ return ((struct fsl_spi_match_data *)match->data)->type;
+ }
+ return TYPE_FSL;
+}
static void fsl_spi_change_mode(struct spi_device *spi)
{
@@ -119,18 +102,7 @@ static void fsl_spi_change_mode(struct spi_device *spi)
/* When in CPM mode, we need to reinit tx and rx. */
if (mspi->flags & SPI_CPM_MODE) {
- if (mspi->flags & SPI_QE) {
- qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
- QE_CR_PROTOCOL_UNSPECIFIED, 0);
- } else {
- cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
- if (mspi->flags & SPI_CPM1) {
- out_be16(&mspi->pram->rbptr,
- in_be16(&mspi->pram->rbase));
- out_be16(&mspi->pram->tbptr,
- in_be16(&mspi->pram->tbase));
- }
- }
+ fsl_spi_cpm_reinit_txrx(mspi);
}
mpc8xxx_spi_write_reg(mode, cs->hw_mode);
local_irq_restore(flags);
@@ -163,6 +135,40 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
}
}
+static void fsl_spi_qe_cpu_set_shifts(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first)
+{
+ *rx_shift = 0;
+ *tx_shift = 0;
+ if (msb_first) {
+ if (bits_per_word <= 8) {
+ *rx_shift = 16;
+ *tx_shift = 24;
+ } else if (bits_per_word <= 16) {
+ *rx_shift = 16;
+ *tx_shift = 16;
+ }
+ } else {
+ if (bits_per_word <= 8)
+ *rx_shift = 8;
+ }
+}
+
+static void fsl_spi_grlib_set_shifts(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first)
+{
+ *rx_shift = 0;
+ *tx_shift = 0;
+ if (bits_per_word <= 16) {
+ if (msb_first) {
+ *rx_shift = 16; /* LSB in bit 16 */
+ *tx_shift = 32 - bits_per_word; /* MSB in bit 31 */
+ } else {
+ *rx_shift = 16 - bits_per_word; /* MSB in bit 15 */
+ }
+ }
+}
+
static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
struct spi_device *spi,
struct mpc8xxx_spi *mpc8xxx_spi,
@@ -173,31 +179,20 @@ static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
if (bits_per_word <= 8) {
cs->get_rx = mpc8xxx_spi_rx_buf_u8;
cs->get_tx = mpc8xxx_spi_tx_buf_u8;
- if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
- cs->rx_shift = 16;
- cs->tx_shift = 24;
- }
} else if (bits_per_word <= 16) {
cs->get_rx = mpc8xxx_spi_rx_buf_u16;
cs->get_tx = mpc8xxx_spi_tx_buf_u16;
- if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
- cs->rx_shift = 16;
- cs->tx_shift = 16;
- }
} else if (bits_per_word <= 32) {
cs->get_rx = mpc8xxx_spi_rx_buf_u32;
cs->get_tx = mpc8xxx_spi_tx_buf_u32;
} else
return -EINVAL;
- if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE &&
- spi->mode & SPI_LSB_FIRST) {
- cs->tx_shift = 0;
- if (bits_per_word <= 8)
- cs->rx_shift = 8;
- else
- cs->rx_shift = 0;
- }
+ if (mpc8xxx_spi->set_shifts)
+ mpc8xxx_spi->set_shifts(&cs->rx_shift, &cs->tx_shift,
+ bits_per_word,
+ !(spi->mode & SPI_LSB_FIRST));
+
mpc8xxx_spi->rx_shift = cs->rx_shift;
mpc8xxx_spi->tx_shift = cs->tx_shift;
mpc8xxx_spi->get_rx = cs->get_rx;
@@ -244,11 +239,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
if (!bits_per_word)
bits_per_word = spi->bits_per_word;
- /* Make sure its a bit width we support [4..16, 32] */
- if ((bits_per_word < 4)
- || ((bits_per_word > 16) && (bits_per_word != 32)))
- return -EINVAL;
-
if (!hz)
hz = spi->max_speed_hz;
@@ -295,112 +285,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
return 0;
}
-static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
-{
- struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
- struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
- unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
- unsigned int xfer_ofs;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
-
- xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
-
- if (mspi->rx_dma == mspi->dma_dummy_rx)
- out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
- else
- out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
- out_be16(&rx_bd->cbd_datlen, 0);
- out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
-
- if (mspi->tx_dma == mspi->dma_dummy_tx)
- out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
- else
- out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
- out_be16(&tx_bd->cbd_datlen, xfer_len);
- out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
- BD_SC_LAST);
-
- /* start transfer */
- mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
-}
-
-static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
- struct spi_transfer *t, bool is_dma_mapped)
-{
- struct device *dev = mspi->dev;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
-
- if (is_dma_mapped) {
- mspi->map_tx_dma = 0;
- mspi->map_rx_dma = 0;
- } else {
- mspi->map_tx_dma = 1;
- mspi->map_rx_dma = 1;
- }
-
- if (!t->tx_buf) {
- mspi->tx_dma = mspi->dma_dummy_tx;
- mspi->map_tx_dma = 0;
- }
-
- if (!t->rx_buf) {
- mspi->rx_dma = mspi->dma_dummy_rx;
- mspi->map_rx_dma = 0;
- }
-
- if (mspi->map_tx_dma) {
- void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
-
- mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, mspi->tx_dma)) {
- dev_err(dev, "unable to map tx dma\n");
- return -ENOMEM;
- }
- } else if (t->tx_buf) {
- mspi->tx_dma = t->tx_dma;
- }
-
- if (mspi->map_rx_dma) {
- mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, mspi->rx_dma)) {
- dev_err(dev, "unable to map rx dma\n");
- goto err_rx_dma;
- }
- } else if (t->rx_buf) {
- mspi->rx_dma = t->rx_dma;
- }
-
- /* enable rx ints */
- mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB);
-
- mspi->xfer_in_progress = t;
- mspi->count = t->len;
-
- /* start CPM transfers */
- fsl_spi_cpm_bufs_start(mspi);
-
- return 0;
-
-err_rx_dma:
- if (mspi->map_tx_dma)
- dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
- return -ENOMEM;
-}
-
-static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
-{
- struct device *dev = mspi->dev;
- struct spi_transfer *t = mspi->xfer_in_progress;
-
- if (mspi->map_tx_dma)
- dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
- if (mspi->map_rx_dma)
- dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
- mspi->xfer_in_progress = NULL;
-}
-
static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, unsigned int len)
{
@@ -449,7 +333,7 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
mpc8xxx_spi->tx = t->tx_buf;
mpc8xxx_spi->rx = t->rx_buf;
- INIT_COMPLETION(mpc8xxx_spi->done);
+ reinit_completion(&mpc8xxx_spi->done);
if (mpc8xxx_spi->flags & SPI_CPM_MODE)
ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
@@ -472,18 +356,28 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
static void fsl_spi_do_one_msg(struct spi_message *m)
{
struct spi_device *spi = m->spi;
- struct spi_transfer *t;
+ struct spi_transfer *t, *first;
unsigned int cs_change;
const int nsecs = 50;
int status;
- cs_change = 1;
- status = 0;
+ /* Don't allow changes if CS is active */
+ first = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->bits_per_word || t->speed_hz) {
- /* Don't allow changes if CS is active */
+ if ((first->bits_per_word != t->bits_per_word) ||
+ (first->speed_hz != t->speed_hz)) {
status = -EINVAL;
+ dev_err(&spi->dev,
+ "bits_per_word/speed_hz should be same for the same SPI transfer\n");
+ return;
+ }
+ }
+ cs_change = 1;
+ status = -EINVAL;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
if (cs_change)
status = fsl_spi_setup_transfer(spi, t);
if (status < 0)
@@ -514,7 +408,8 @@ static void fsl_spi_do_one_msg(struct spi_message *m)
}
m->status = status;
- m->complete(m->context);
+ if (m->complete)
+ m->complete(m->context);
if (status || !cs_change) {
ndelay(nsecs);
@@ -536,7 +431,7 @@ static int fsl_spi_setup(struct spi_device *spi)
return -EINVAL;
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
@@ -565,31 +460,45 @@ static int fsl_spi_setup(struct spi_device *spi)
cs->hw_mode = hw_mode; /* Restore settings */
return retval;
}
- return 0;
-}
-static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
-{
- u16 len;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
+ if (mpc8xxx_spi->type == TYPE_GRLIB) {
+ if (gpio_is_valid(spi->cs_gpio)) {
+ int desel;
- dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
- in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
+ retval = gpio_request(spi->cs_gpio,
+ dev_name(&spi->dev));
+ if (retval)
+ return retval;
- len = in_be16(&mspi->rx_bd->cbd_datlen);
- if (len > mspi->count) {
- WARN_ON(1);
- len = mspi->count;
+ desel = !(spi->mode & SPI_CS_HIGH);
+ retval = gpio_direction_output(spi->cs_gpio, desel);
+ if (retval) {
+ gpio_free(spi->cs_gpio);
+ return retval;
+ }
+ } else if (spi->cs_gpio != -ENOENT) {
+ if (spi->cs_gpio < 0)
+ return spi->cs_gpio;
+ return -EINVAL;
+ }
+ /* When spi->cs_gpio == -ENOENT, a hole in the phandle list
+ * indicates to use native chipselect if present, or allow for
+ * an always selected chip
+ */
}
- /* Clear the events */
- mpc8xxx_spi_write_reg(&reg_base->event, events);
+ /* Initialize chipselect - might be active for SPI_CS_HIGH mode */
+ fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
- mspi->count -= len;
- if (mspi->count)
- fsl_spi_cpm_bufs_start(mspi);
- else
- complete(&mspi->done);
+ return 0;
+}
+
+static void fsl_spi_cleanup(struct spi_device *spi)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+
+ if (mpc8xxx_spi->type == TYPE_GRLIB && gpio_is_valid(spi->cs_gpio))
+ gpio_free(spi->cs_gpio);
}
static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
@@ -646,207 +555,57 @@ static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
return ret;
}
-static void *fsl_spi_alloc_dummy_rx(void)
-{
- mutex_lock(&fsl_dummy_rx_lock);
-
- if (!fsl_dummy_rx)
- fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
- if (fsl_dummy_rx)
- fsl_dummy_rx_refcnt++;
-
- mutex_unlock(&fsl_dummy_rx_lock);
-
- return fsl_dummy_rx;
-}
-
-static void fsl_spi_free_dummy_rx(void)
+static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
{
- mutex_lock(&fsl_dummy_rx_lock);
-
- switch (fsl_dummy_rx_refcnt) {
- case 0:
- WARN_ON(1);
- break;
- case 1:
- kfree(fsl_dummy_rx);
- fsl_dummy_rx = NULL;
- /* fall through */
- default:
- fsl_dummy_rx_refcnt--;
- break;
- }
-
- mutex_unlock(&fsl_dummy_rx_lock);
+ iounmap(mspi->reg_base);
+ fsl_spi_cpm_free(mspi);
}
-static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
+static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
{
- struct device *dev = mspi->dev;
- struct device_node *np = dev->of_node;
- const u32 *iprop;
- int size;
- void __iomem *spi_base;
- unsigned long pram_ofs = -ENOMEM;
-
- /* Can't use of_address_to_resource(), QE muram isn't at 0. */
- iprop = of_get_property(np, "reg", &size);
-
- /* QE with a fixed pram location? */
- if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
- return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
-
- /* QE but with a dynamic pram location? */
- if (mspi->flags & SPI_QE) {
- pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
- qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
- QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
- return pram_ofs;
- }
-
- spi_base = of_iomap(np, 1);
- if (spi_base == NULL)
- return -EINVAL;
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base;
+ u32 slvsel;
+ u16 cs = spi->chip_select;
- if (mspi->flags & SPI_CPM2) {
- pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
- out_be16(spi_base, pram_ofs);
- } else {
- struct spi_pram __iomem *pram = spi_base;
- u16 rpbase = in_be16(&pram->rpbase);
-
- /* Microcode relocation patch applied? */
- if (rpbase)
- pram_ofs = rpbase;
- else {
- pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
- out_be16(spi_base, pram_ofs);
- }
+ if (gpio_is_valid(spi->cs_gpio)) {
+ gpio_set_value(spi->cs_gpio, on);
+ } else if (cs < mpc8xxx_spi->native_chipselects) {
+ slvsel = mpc8xxx_spi_read_reg(&reg_base->slvsel);
+ slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs));
+ mpc8xxx_spi_write_reg(&reg_base->slvsel, slvsel);
}
-
- iounmap(spi_base);
- return pram_ofs;
}
-static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
+static void fsl_spi_grlib_probe(struct device *dev)
{
- struct device *dev = mspi->dev;
- struct device_node *np = dev->of_node;
- const u32 *iprop;
- int size;
- unsigned long pram_ofs;
- unsigned long bds_ofs;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base;
+ int mbits;
+ u32 capabilities;
- if (!(mspi->flags & SPI_CPM_MODE))
- return 0;
+ capabilities = mpc8xxx_spi_read_reg(&reg_base->cap);
- if (!fsl_spi_alloc_dummy_rx())
- return -ENOMEM;
+ mpc8xxx_spi->set_shifts = fsl_spi_grlib_set_shifts;
+ mbits = SPCAP_MAXWLEN(capabilities);
+ if (mbits)
+ mpc8xxx_spi->max_bits_per_word = mbits + 1;
- if (mspi->flags & SPI_QE) {
- iprop = of_get_property(np, "cell-index", &size);
- if (iprop && size == sizeof(*iprop))
- mspi->subblock = *iprop;
-
- switch (mspi->subblock) {
- default:
- dev_warn(dev, "cell-index unspecified, assuming SPI1");
- /* fall through */
- case 0:
- mspi->subblock = QE_CR_SUBBLOCK_SPI1;
- break;
- case 1:
- mspi->subblock = QE_CR_SUBBLOCK_SPI2;
- break;
- }
- }
-
- pram_ofs = fsl_spi_cpm_get_pram(mspi);
- if (IS_ERR_VALUE(pram_ofs)) {
- dev_err(dev, "can't allocate spi parameter ram\n");
- goto err_pram;
+ mpc8xxx_spi->native_chipselects = 0;
+ if (SPCAP_SSEN(capabilities)) {
+ mpc8xxx_spi->native_chipselects = SPCAP_SSSZ(capabilities);
+ mpc8xxx_spi_write_reg(&reg_base->slvsel, 0xffffffff);
}
-
- bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
- sizeof(*mspi->rx_bd), 8);
- if (IS_ERR_VALUE(bds_ofs)) {
- dev_err(dev, "can't allocate bds\n");
- goto err_bds;
- }
-
- mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
- dev_err(dev, "unable to map dummy tx buffer\n");
- goto err_dummy_tx;
- }
-
- mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
- dev_err(dev, "unable to map dummy rx buffer\n");
- goto err_dummy_rx;
- }
-
- mspi->pram = cpm_muram_addr(pram_ofs);
-
- mspi->tx_bd = cpm_muram_addr(bds_ofs);
- mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
-
- /* Initialize parameter ram. */
- out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
- out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
- out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
- out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
- out_be16(&mspi->pram->mrblr, SPI_MRBLR);
- out_be32(&mspi->pram->rstate, 0);
- out_be32(&mspi->pram->rdp, 0);
- out_be16(&mspi->pram->rbptr, 0);
- out_be16(&mspi->pram->rbc, 0);
- out_be32(&mspi->pram->rxtmp, 0);
- out_be32(&mspi->pram->tstate, 0);
- out_be32(&mspi->pram->tdp, 0);
- out_be16(&mspi->pram->tbptr, 0);
- out_be16(&mspi->pram->tbc, 0);
- out_be32(&mspi->pram->txtmp, 0);
-
- return 0;
-
-err_dummy_rx:
- dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
-err_dummy_tx:
- cpm_muram_free(bds_ofs);
-err_bds:
- cpm_muram_free(pram_ofs);
-err_pram:
- fsl_spi_free_dummy_rx();
- return -ENOMEM;
+ master->num_chipselect = mpc8xxx_spi->native_chipselects;
+ pdata->cs_control = fsl_spi_grlib_cs_control;
}
-static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
-{
- struct device *dev = mspi->dev;
-
- if (!(mspi->flags & SPI_CPM_MODE))
- return;
-
- dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
- dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
- cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
- cpm_muram_free(cpm_muram_offset(mspi->pram));
- fsl_spi_free_dummy_rx();
-}
-
-static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
-{
- iounmap(mspi->reg_base);
- fsl_spi_cpm_free(mspi);
-}
-
-static struct spi_master * __devinit fsl_spi_probe(struct device *dev,
+static struct spi_master * fsl_spi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg *reg_base;
@@ -866,27 +625,39 @@ static struct spi_master * __devinit fsl_spi_probe(struct device *dev,
goto err_probe;
master->setup = fsl_spi_setup;
+ master->cleanup = fsl_spi_cleanup;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg;
mpc8xxx_spi->spi_remove = fsl_spi_remove;
-
+ mpc8xxx_spi->max_bits_per_word = 32;
+ mpc8xxx_spi->type = fsl_spi_get_type(dev);
ret = fsl_spi_cpm_init(mpc8xxx_spi);
if (ret)
goto err_cpm_init;
- if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
- mpc8xxx_spi->rx_shift = 16;
- mpc8xxx_spi->tx_shift = 24;
- }
-
mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
if (mpc8xxx_spi->reg_base == NULL) {
ret = -ENOMEM;
goto err_ioremap;
}
+ if (mpc8xxx_spi->type == TYPE_GRLIB)
+ fsl_spi_grlib_probe(dev);
+
+ master->bits_per_word_mask =
+ (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32)) &
+ SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
+
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
+ mpc8xxx_spi->set_shifts = fsl_spi_qe_cpu_set_shifts;
+
+ if (mpc8xxx_spi->set_shifts)
+ /* 8 bits per word and MSB first */
+ mpc8xxx_spi->set_shifts(&mpc8xxx_spi->rx_shift,
+ &mpc8xxx_spi->tx_shift, 8, 1);
+
/* Register for SPI Interrupt */
ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq,
0, "fsl_spi", mpc8xxx_spi);
@@ -904,6 +675,10 @@ static struct spi_master * __devinit fsl_spi_probe(struct device *dev,
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+ if (mpc8xxx_spi->max_bits_per_word < 8) {
+ regval &= ~SPMODE_LEN(0xF);
+ regval |= SPMODE_LEN(mpc8xxx_spi->max_bits_per_word - 1);
+ }
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
regval |= SPMODE_OP;
@@ -934,7 +709,8 @@ err:
static void fsl_spi_cs_control(struct spi_device *spi, bool on)
{
struct device *dev = spi->dev.parent->parent;
- struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
u16 cs = spi->chip_select;
int gpio = pinfo->gpios[cs];
bool alow = pinfo->alow_flags[cs];
@@ -945,14 +721,14 @@ static void fsl_spi_cs_control(struct spi_device *spi, bool on)
static int of_fsl_spi_get_chipselects(struct device *dev)
{
struct device_node *np = dev->of_node;
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
- unsigned int ngpios;
+ int ngpios;
int i = 0;
int ret;
ngpios = of_gpio_count(np);
- if (!ngpios) {
+ if (ngpios <= 0) {
/*
* SPI w/o chip-select line. One SPI device is still permitted
* though.
@@ -1024,7 +800,7 @@ err_alloc_flags:
static int of_fsl_spi_free_chipselects(struct device *dev)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
int i;
@@ -1041,34 +817,37 @@ static int of_fsl_spi_free_chipselects(struct device *dev)
return 0;
}
-static int __devinit of_fsl_spi_probe(struct platform_device *ofdev)
+static int of_fsl_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
- struct resource irq;
+ int irq, type;
int ret = -ENOMEM;
ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
return ret;
- ret = of_fsl_spi_get_chipselects(dev);
- if (ret)
- goto err;
+ type = fsl_spi_get_type(&ofdev->dev);
+ if (type == TYPE_FSL) {
+ ret = of_fsl_spi_get_chipselects(dev);
+ if (ret)
+ goto err;
+ }
ret = of_address_to_resource(np, 0, &mem);
if (ret)
goto err;
- ret = of_irq_to_resource(np, 0, &irq);
- if (!ret) {
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
ret = -EINVAL;
goto err;
}
- master = fsl_spi_probe(dev, &mem, irq.start);
+ master = fsl_spi_probe(dev, &mem, irq);
if (IS_ERR(master)) {
ret = PTR_ERR(master);
goto err;
@@ -1077,27 +856,25 @@ static int __devinit of_fsl_spi_probe(struct platform_device *ofdev)
return 0;
err:
- of_fsl_spi_free_chipselects(dev);
+ if (type == TYPE_FSL)
+ of_fsl_spi_free_chipselects(dev);
return ret;
}
-static int __devexit of_fsl_spi_remove(struct platform_device *ofdev)
+static int of_fsl_spi_remove(struct platform_device *ofdev)
{
+ struct spi_master *master = platform_get_drvdata(ofdev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
int ret;
ret = mpc8xxx_spi_remove(&ofdev->dev);
if (ret)
return ret;
- of_fsl_spi_free_chipselects(&ofdev->dev);
+ if (mpc8xxx_spi->type == TYPE_FSL)
+ of_fsl_spi_free_chipselects(&ofdev->dev);
return 0;
}
-static const struct of_device_id of_fsl_spi_match[] = {
- { .compatible = "fsl,spi" },
- {}
-};
-MODULE_DEVICE_TABLE(of, of_fsl_spi_match);
-
static struct platform_driver of_fsl_spi_driver = {
.driver = {
.name = "fsl_spi",
@@ -1105,7 +882,7 @@ static struct platform_driver of_fsl_spi_driver = {
.of_match_table = of_fsl_spi_match,
},
.probe = of_fsl_spi_probe,
- .remove = __devexit_p(of_fsl_spi_remove),
+ .remove = of_fsl_spi_remove,
};
#ifdef CONFIG_MPC832x_RDB
@@ -1116,13 +893,13 @@ static struct platform_driver of_fsl_spi_driver = {
* tree can work with OpenFirmware driver. But for now we support old trees
* as well.
*/
-static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
+static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
{
struct resource *mem;
int irq;
struct spi_master *master;
- if (!pdev->dev.platform_data)
+ if (!dev_get_platdata(&pdev->dev))
return -EINVAL;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1134,12 +911,10 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
return -EINVAL;
master = fsl_spi_probe(&pdev->dev, mem, irq);
- if (IS_ERR(master))
- return PTR_ERR(master);
- return 0;
+ return PTR_ERR_OR_ZERO(master);
}
-static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
+static int plat_mpc8xxx_spi_remove(struct platform_device *pdev)
{
return mpc8xxx_spi_remove(&pdev->dev);
}
@@ -1147,7 +922,7 @@ static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:mpc8xxx_spi");
static struct platform_driver mpc8xxx_spi_driver = {
.probe = plat_mpc8xxx_spi_probe,
- .remove = __devexit_p(plat_mpc8xxx_spi_remove),
+ .remove = plat_mpc8xxx_spi_remove,
.driver = {
.name = "mpc8xxx_spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-fsl-spi.h b/drivers/spi/spi-fsl-spi.h
new file mode 100644
index 00000000000..9a6dae00e3f
--- /dev/null
+++ b/drivers/spi/spi-fsl-spi.h
@@ -0,0 +1,72 @@
+/*
+ * Freescale SPI controller driver.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * GRLIB support:
+ * Copyright (c) 2012 Aeroflex Gaisler AB.
+ * Author: Andreas Larsson <andreas@gaisler.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __SPI_FSL_SPI_H__
+#define __SPI_FSL_SPI_H__
+
+/* SPI Controller registers */
+struct fsl_spi_reg {
+ __be32 cap; /* TYPE_GRLIB specific */
+ u8 res1[0x1C];
+ __be32 mode;
+ __be32 event;
+ __be32 mask;
+ __be32 command;
+ __be32 transmit;
+ __be32 receive;
+ __be32 slvsel; /* TYPE_GRLIB specific */
+};
+
+/* SPI Controller mode register definitions */
+#define SPMODE_LOOP (1 << 30)
+#define SPMODE_CI_INACTIVEHIGH (1 << 29)
+#define SPMODE_CP_BEGIN_EDGECLK (1 << 28)
+#define SPMODE_DIV16 (1 << 27)
+#define SPMODE_REV (1 << 26)
+#define SPMODE_MS (1 << 25)
+#define SPMODE_ENABLE (1 << 24)
+#define SPMODE_LEN(x) ((x) << 20)
+#define SPMODE_PM(x) ((x) << 16)
+#define SPMODE_OP (1 << 14)
+#define SPMODE_CG(x) ((x) << 7)
+
+/* TYPE_GRLIB SPI Controller capability register definitions */
+#define SPCAP_SSEN(x) (((x) >> 16) & 0x1)
+#define SPCAP_SSSZ(x) (((x) >> 24) & 0xff)
+#define SPCAP_MAXWLEN(x) (((x) >> 20) & 0xf)
+
+/*
+ * Default for SPI Mode:
+ * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk
+ */
+#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \
+ SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf))
+
+/* SPIE register values */
+#define SPIE_NE 0x00000200 /* Not empty */
+#define SPIE_NF 0x00000100 /* Not full */
+
+/* SPIM register values */
+#define SPIM_NE 0x00000200 /* Not empty */
+#define SPIM_NF 0x00000100 /* Not full */
+
+#endif /* __SPI_FSL_SPI_H__ */
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index a2b50c516b3..9f595535cf2 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -19,9 +19,9 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -114,17 +114,17 @@ spi_to_pdata(const struct spi_device *spi)
static inline void setsck(const struct spi_device *spi, int is_on)
{
- gpio_set_value(SPI_SCK_GPIO, is_on);
+ gpio_set_value_cansleep(SPI_SCK_GPIO, is_on);
}
static inline void setmosi(const struct spi_device *spi, int is_on)
{
- gpio_set_value(SPI_MOSI_GPIO, is_on);
+ gpio_set_value_cansleep(SPI_MOSI_GPIO, is_on);
}
static inline int getmiso(const struct spi_device *spi)
{
- return !!gpio_get_value(SPI_MISO_GPIO);
+ return !!gpio_get_value_cansleep(SPI_MISO_GPIO);
}
#undef pdata
@@ -228,7 +228,7 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
if (cs != SPI_GPIO_NO_CHIPSELECT) {
/* SPI is normally active-low */
- gpio_set_value(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
+ gpio_set_value_cansleep(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
}
}
@@ -239,9 +239,6 @@ static int spi_gpio_setup(struct spi_device *spi)
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
struct device_node *np = spi->master->dev.of_node;
- if (spi->bits_per_word > 32)
- return -EINVAL;
-
if (np) {
/*
* In DT environments, the CS GPIOs have already been
@@ -252,7 +249,7 @@ static int spi_gpio_setup(struct spi_device *spi)
/*
* ... otherwise, take it from spi->controller_data
*/
- cs = (unsigned int) spi->controller_data;
+ cs = (unsigned int)(uintptr_t) spi->controller_data;
}
if (!spi->controller_state) {
@@ -265,9 +262,9 @@ static int spi_gpio_setup(struct spi_device *spi)
}
}
if (!status) {
- status = spi_bitbang_setup(spi);
/* in case it was initialized from static board data */
spi_gpio->cs_gpios[spi->chip_select] = cs;
+ status = spi_bitbang_setup(spi);
}
if (status) {
@@ -287,7 +284,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
spi_bitbang_cleanup(spi);
}
-static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
{
int value;
@@ -301,9 +298,8 @@ static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
return value;
}
-static int __devinit
-spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
- u16 *res_flags)
+static int spi_gpio_request(struct spi_gpio_platform_data *pdata,
+ const char *label, u16 *res_flags)
{
int value;
@@ -344,7 +340,7 @@ done:
}
#ifdef CONFIG_OF
-static struct of_device_id spi_gpio_dt_ids[] = {
+static const struct of_device_id spi_gpio_dt_ids[] = {
{ .compatible = "spi-gpio" },
{}
};
@@ -366,9 +362,26 @@ static int spi_gpio_probe_dt(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- pdata->sck = of_get_named_gpio(np, "gpio-sck", 0);
- pdata->miso = of_get_named_gpio(np, "gpio-miso", 0);
- pdata->mosi = of_get_named_gpio(np, "gpio-mosi", 0);
+ ret = of_get_named_gpio(np, "gpio-sck", 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio-sck property not found\n");
+ goto error_free;
+ }
+ pdata->sck = ret;
+
+ ret = of_get_named_gpio(np, "gpio-miso", 0);
+ if (ret < 0) {
+ dev_info(&pdev->dev, "gpio-miso property not found, switching to no-rx mode\n");
+ pdata->miso = SPI_GPIO_NO_MISO;
+ } else
+ pdata->miso = ret;
+
+ ret = of_get_named_gpio(np, "gpio-mosi", 0);
+ if (ret < 0) {
+ dev_info(&pdev->dev, "gpio-mosi property not found, switching to no-tx mode\n");
+ pdata->mosi = SPI_GPIO_NO_MOSI;
+ } else
+ pdata->mosi = ret;
ret = of_property_read_u32(np, "num-chipselects", &tmp);
if (ret < 0) {
@@ -392,7 +405,7 @@ static inline int spi_gpio_probe_dt(struct platform_device *pdev)
}
#endif
-static int __devinit spi_gpio_probe(struct platform_device *pdev)
+static int spi_gpio_probe(struct platform_device *pdev)
{
int status;
struct spi_master *master;
@@ -407,7 +420,7 @@ static int __devinit spi_gpio_probe(struct platform_device *pdev)
if (status > 0)
use_of = 1;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
#ifdef GENERIC_BITBANG
if (!pdata || !pdata->num_chipselect)
return -ENODEV;
@@ -430,6 +443,7 @@ static int __devinit spi_gpio_probe(struct platform_device *pdev)
if (pdata)
spi_gpio->pdata = *pdata;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->flags = master_flags;
master->bus_num = pdev->id;
master->num_chipselect = SPI_N_CHIPSEL;
@@ -453,7 +467,7 @@ static int __devinit spi_gpio_probe(struct platform_device *pdev)
}
#endif
- spi_gpio->bitbang.master = spi_master_get(master);
+ spi_gpio->bitbang.master = master;
spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
@@ -472,7 +486,6 @@ static int __devinit spi_gpio_probe(struct platform_device *pdev)
status = spi_bitbang_start(&spi_gpio->bitbang);
if (status < 0) {
- spi_master_put(spi_gpio->bitbang.master);
gpio_free:
if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
gpio_free(SPI_MISO_GPIO);
@@ -485,28 +498,25 @@ gpio_free:
return status;
}
-static int __devexit spi_gpio_remove(struct platform_device *pdev)
+static int spi_gpio_remove(struct platform_device *pdev)
{
struct spi_gpio *spi_gpio;
struct spi_gpio_platform_data *pdata;
- int status;
spi_gpio = platform_get_drvdata(pdev);
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
/* stop() unregisters child devices too */
- status = spi_bitbang_stop(&spi_gpio->bitbang);
- spi_master_put(spi_gpio->bitbang.master);
-
- platform_set_drvdata(pdev, NULL);
+ spi_bitbang_stop(&spi_gpio->bitbang);
if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
gpio_free(SPI_MISO_GPIO);
if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI)
gpio_free(SPI_MOSI_GPIO);
gpio_free(SPI_SCK_GPIO);
+ spi_master_put(spi_gpio->bitbang.master);
- return status;
+ return 0;
}
MODULE_ALIAS("platform:" DRIVER_NAME);
@@ -518,7 +528,7 @@ static struct platform_driver spi_gpio_driver = {
.of_match_table = of_match_ptr(spi_gpio_dt_ids),
},
.probe = spi_gpio_probe,
- .remove = __devexit_p(spi_gpio_remove),
+ .remove = spi_gpio_remove,
};
module_platform_driver(spi_gpio_driver);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index c9a0d8467de..5daff2054ae 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -37,7 +36,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/spi-imx.h>
@@ -207,7 +205,8 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
#define MX51_ECSPI_STAT_RR (1 << 3)
/* MX51 eCSPI */
-static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
+static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi,
+ unsigned int *fres)
{
/*
* there are two 4-bit dividers, the pre-divider divides by
@@ -235,6 +234,10 @@ static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
__func__, fin, fspi, post, pre);
+
+ /* Resulting frequency for the SCLK line. */
+ *fres = (fin / (pre + 1)) >> post;
+
return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
}
@@ -265,6 +268,7 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
+ u32 clk = config->speed_hz, delay;
/*
* The hardware seems to have a race condition when changing modes. The
@@ -276,7 +280,7 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/* set clock speed */
- ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz);
+ ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk);
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
@@ -298,6 +302,23 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
+ /*
+ * Wait until the changes in the configuration register CONFIGREG
+ * propagate into the hardware. It takes exactly one tick of the
+ * SCLK clock, but we will wait two SCLK clock just to be sure. The
+ * effect of the delay it takes for the hardware to apply changes
+ * is noticable if the SCLK clock run very slow. In such a case, if
+ * the polarity of SCLK should be inverted, the GPIO ChipSelect might
+ * be asserted before the SCLK polarity changes, which would disrupt
+ * the SPI communication as the device on the other end would consider
+ * the change of SCLK polarity as a clock tick already.
+ */
+ delay = (2 * 1000000) / clk;
+ if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
+ udelay(delay);
+ else /* SCLK is _very_ slow */
+ usleep_range(delay, delay + 10);
+
return 0;
}
@@ -620,6 +641,7 @@ static const struct of_device_id spi_imx_dt_ids[] = {
{ .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
static void spi_imx_chipselect(struct spi_device *spi, int is_active)
{
@@ -698,11 +720,10 @@ static int spi_imx_setupxfer(struct spi_device *spi,
} else if (config.bpw <= 16) {
spi_imx->rx = spi_imx_buf_rx_u16;
spi_imx->tx = spi_imx_buf_tx_u16;
- } else if (config.bpw <= 32) {
+ } else {
spi_imx->rx = spi_imx_buf_rx_u32;
spi_imx->tx = spi_imx_buf_tx_u32;
- } else
- BUG();
+ }
spi_imx->devtype_data->config(spi_imx, &config);
@@ -719,7 +740,7 @@ static int spi_imx_transfer(struct spi_device *spi,
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
- init_completion(&spi_imx->xfer_done);
+ reinit_completion(&spi_imx->xfer_done);
spi_imx_push(spi_imx);
@@ -750,7 +771,36 @@ static void spi_imx_cleanup(struct spi_device *spi)
{
}
-static int __devinit spi_imx_probe(struct platform_device *pdev)
+static int
+spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_enable(spi_imx->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(spi_imx->clk_ipg);
+ if (ret) {
+ clk_disable(spi_imx->clk_per);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ clk_disable(spi_imx->clk_ipg);
+ clk_disable(spi_imx->clk_per);
+ return 0;
+}
+
+static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
@@ -760,7 +810,6 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
struct spi_master *master;
struct spi_imx_data *spi_imx;
struct resource *res;
- struct pinctrl *pinctrl;
int i, ret, num_cs;
if (!np && !mxc_platform_info) {
@@ -783,11 +832,12 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->bus_num = pdev->id;
master->num_chipselect = num_cs;
spi_imx = spi_master_get_devdata(master);
- spi_imx->bitbang.master = spi_master_get(master);
+ spi_imx->bitbang.master = master;
for (i = 0; i < master->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
@@ -798,10 +848,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
if (!gpio_is_valid(cs_gpio))
continue;
- ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
+ ret = devm_gpio_request(&pdev->dev, spi_imx->chipselect[i],
+ DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev, "can't get cs gpios\n");
- goto out_gpio_free;
+ goto out_master_put;
}
}
@@ -810,6 +861,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
spi_imx->bitbang.master->setup = spi_imx_setup;
spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
+ spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
+ spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
init_completion(&spi_imx->xfer_done);
@@ -818,56 +871,44 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
(struct spi_imx_devtype_data *) pdev->id_entry->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't get platform resource\n");
- ret = -ENOMEM;
- goto out_gpio_free;
- }
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- ret = -EBUSY;
- goto out_gpio_free;
- }
-
- spi_imx->base = ioremap(res->start, resource_size(res));
- if (!spi_imx->base) {
- ret = -EINVAL;
- goto out_release_mem;
+ spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spi_imx->base)) {
+ ret = PTR_ERR(spi_imx->base);
+ goto out_master_put;
}
spi_imx->irq = platform_get_irq(pdev, 0);
if (spi_imx->irq < 0) {
- ret = -EINVAL;
- goto out_iounmap;
+ ret = spi_imx->irq;
+ goto out_master_put;
}
- ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx);
+ ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0,
+ dev_name(&pdev->dev), spi_imx);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
- goto out_iounmap;
- }
-
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
- goto out_free_irq;
+ goto out_master_put;
}
spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(spi_imx->clk_ipg)) {
ret = PTR_ERR(spi_imx->clk_ipg);
- goto out_free_irq;
+ goto out_master_put;
}
spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(spi_imx->clk_per)) {
ret = PTR_ERR(spi_imx->clk_per);
- goto out_free_irq;
+ goto out_master_put;
}
- clk_prepare_enable(spi_imx->clk_per);
- clk_prepare_enable(spi_imx->clk_ipg);
+ ret = clk_prepare_enable(spi_imx->clk_per);
+ if (ret)
+ goto out_master_put;
+
+ ret = clk_prepare_enable(spi_imx->clk_ipg);
+ if (ret)
+ goto out_put_per;
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
@@ -884,53 +925,32 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "probed\n");
+ clk_disable(spi_imx->clk_ipg);
+ clk_disable(spi_imx->clk_per);
return ret;
out_clk_put:
- clk_disable_unprepare(spi_imx->clk_per);
clk_disable_unprepare(spi_imx->clk_ipg);
-out_free_irq:
- free_irq(spi_imx->irq, spi_imx);
-out_iounmap:
- iounmap(spi_imx->base);
-out_release_mem:
- release_mem_region(res->start, resource_size(res));
-out_gpio_free:
- while (--i >= 0) {
- if (gpio_is_valid(spi_imx->chipselect[i]))
- gpio_free(spi_imx->chipselect[i]);
- }
+out_put_per:
+ clk_disable_unprepare(spi_imx->clk_per);
+out_master_put:
spi_master_put(master);
- kfree(master);
- platform_set_drvdata(pdev, NULL);
+
return ret;
}
-static int __devexit spi_imx_remove(struct platform_device *pdev)
+static int spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- int i;
spi_bitbang_stop(&spi_imx->bitbang);
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_disable_unprepare(spi_imx->clk_per);
- clk_disable_unprepare(spi_imx->clk_ipg);
- free_irq(spi_imx->irq, spi_imx);
- iounmap(spi_imx->base);
-
- for (i = 0; i < master->num_chipselect; i++)
- if (gpio_is_valid(spi_imx->chipselect[i]))
- gpio_free(spi_imx->chipselect[i]);
-
+ clk_unprepare(spi_imx->clk_ipg);
+ clk_unprepare(spi_imx->clk_per);
spi_master_put(master);
- release_mem_region(res->start, resource_size(res));
-
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
@@ -942,10 +962,11 @@ static struct platform_driver spi_imx_driver = {
},
.id_table = spi_imx_devtype,
.probe = spi_imx_probe,
- .remove = __devexit_p(spi_imx_remove),
+ .remove = spi_imx_remove,
};
module_platform_driver(spi_imx_driver);
MODULE_DESCRIPTION("SPI Master Controller driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 0759b5db988..41c5765be74 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -222,7 +222,7 @@ static void spi_lm70llp_attach(struct parport *p)
/*
* SPI and bitbang hookup.
*/
- pp->bitbang.master = spi_master_get(master);
+ pp->bitbang.master = master;
pp->bitbang.chipselect = lm70_chipselect;
pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
pp->bitbang.flags = SPI_3WIRE;
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 0a1e39e94d0..577d23a1276 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -16,40 +16,33 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
+#include <linux/gpio.h>
#include <asm/mpc52xx_psc.h>
struct mpc512x_psc_spi {
void (*cs_control)(struct spi_device *spi, bool on);
- u32 sysclk;
/* driver internal data */
struct mpc52xx_psc __iomem *psc;
struct mpc512x_psc_fifo __iomem *fifo;
unsigned int irq;
u8 bits_per_word;
- u8 busy;
- u32 mclk;
- u8 eofbyte;
+ struct clk *clk_mclk;
+ struct clk *clk_ipg;
+ u32 mclk_rate;
- struct workqueue_struct *workqueue;
- struct work_struct work;
-
- struct list_head queue;
- spinlock_t lock; /* Message queue lock */
-
- struct completion done;
+ struct completion txisrdone;
};
/* controller state */
@@ -81,6 +74,7 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
struct mpc52xx_psc __iomem *psc = mps->psc;
u32 sicr;
u32 ccr;
+ int speed;
u16 bclkdiv;
sicr = in_be32(&psc->sicr);
@@ -104,16 +98,16 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
ccr = in_be32(&psc->ccr);
ccr &= 0xFF000000;
- if (cs->speed_hz)
- bclkdiv = (mps->mclk / cs->speed_hz) - 1;
- else
- bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */
+ speed = cs->speed_hz;
+ if (!speed)
+ speed = 1000000; /* default 1MHz */
+ bclkdiv = (mps->mclk_rate / speed) - 1;
ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
out_be32(&psc->ccr, ccr);
mps->bits_per_word = cs->bits_per_word;
- if (mps->cs_control)
+ if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
}
@@ -121,7 +115,7 @@ static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi)
{
struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
- if (mps->cs_control)
+ if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
}
@@ -135,149 +129,225 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
struct spi_transfer *t)
{
struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
- struct mpc52xx_psc __iomem *psc = mps->psc;
struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
- size_t len = t->len;
+ size_t tx_len = t->len;
+ size_t rx_len = t->len;
u8 *tx_buf = (u8 *)t->tx_buf;
u8 *rx_buf = (u8 *)t->rx_buf;
if (!tx_buf && !rx_buf && t->len)
return -EINVAL;
- /* Zero MR2 */
- in_8(&psc->mode);
- out_8(&psc->mode, 0x0);
-
- while (len) {
- int count;
- int i;
+ while (rx_len || tx_len) {
+ size_t txcount;
u8 data;
size_t fifosz;
- int rxcount;
+ size_t rxcount;
+ int rxtries;
/*
- * The number of bytes that can be sent at a time
- * depends on the fifo size.
+ * send the TX bytes in as large a chunk as possible
+ * but neither exceed the TX nor the RX FIFOs
*/
fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz));
- count = min(fifosz, len);
-
- for (i = count; i > 0; i--) {
- data = tx_buf ? *tx_buf++ : 0;
- if (len == EOFBYTE)
- setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF);
- out_8(&fifo->txdata_8, data);
- len--;
- }
-
- INIT_COMPLETION(mps->done);
-
- /* interrupt on tx fifo empty */
- out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
- out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY);
-
- /* enable transmiter/receiver */
- out_8(&psc->command,
- MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
+ txcount = min(fifosz, tx_len);
+ fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->rxsz));
+ fifosz -= in_be32(&fifo->rxcnt) + 1;
+ txcount = min(fifosz, txcount);
+ if (txcount) {
+
+ /* fill the TX FIFO */
+ while (txcount-- > 0) {
+ data = tx_buf ? *tx_buf++ : 0;
+ if (tx_len == EOFBYTE && t->cs_change)
+ setbits32(&fifo->txcmd,
+ MPC512x_PSC_FIFO_EOF);
+ out_8(&fifo->txdata_8, data);
+ tx_len--;
+ }
- wait_for_completion(&mps->done);
+ /* have the ISR trigger when the TX FIFO is empty */
+ reinit_completion(&mps->txisrdone);
+ out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
+ out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY);
+ wait_for_completion(&mps->txisrdone);
+ }
- mdelay(1);
+ /*
+ * consume as much RX data as the FIFO holds, while we
+ * iterate over the transfer's TX data length
+ *
+ * only insist in draining all the remaining RX bytes
+ * when the TX bytes were exhausted (that's at the very
+ * end of this transfer, not when still iterating over
+ * the transfer's chunks)
+ */
+ rxtries = 50;
+ do {
+
+ /*
+ * grab whatever was in the FIFO when we started
+ * looking, don't bother fetching what was added to
+ * the FIFO while we read from it -- we'll return
+ * here eventually and prefer sending out remaining
+ * TX data
+ */
+ fifosz = in_be32(&fifo->rxcnt);
+ rxcount = min(fifosz, rx_len);
+ while (rxcount-- > 0) {
+ data = in_8(&fifo->rxdata_8);
+ if (rx_buf)
+ *rx_buf++ = data;
+ rx_len--;
+ }
- /* rx fifo should have count bytes in it */
- rxcount = in_be32(&fifo->rxcnt);
- if (rxcount != count)
- mdelay(1);
+ /*
+ * come back later if there still is TX data to send,
+ * bail out of the RX drain loop if all of the TX data
+ * was sent and all of the RX data was received (i.e.
+ * when the transmission has completed)
+ */
+ if (tx_len)
+ break;
+ if (!rx_len)
+ break;
- rxcount = in_be32(&fifo->rxcnt);
- if (rxcount != count) {
- dev_warn(&spi->dev, "expected %d bytes in rx fifo "
- "but got %d\n", count, rxcount);
+ /*
+ * TX data transmission has completed while RX data
+ * is still pending -- that's a transient situation
+ * which depends on wire speed and specific
+ * hardware implementation details (buffering) yet
+ * should resolve very quickly
+ *
+ * just yield for a moment to not hog the CPU for
+ * too long when running SPI at low speed
+ *
+ * the timeout range is rather arbitrary and tries
+ * to balance throughput against system load; the
+ * chosen values result in a minimal timeout of 50
+ * times 10us and thus work at speeds as low as
+ * some 20kbps, while the maximum timeout at the
+ * transfer's end could be 5ms _if_ nothing else
+ * ticks in the system _and_ RX data still wasn't
+ * received, which only occurs in situations that
+ * are exceptional; removing the unpredictability
+ * of the timeout either decreases throughput
+ * (longer timeouts), or puts more load on the
+ * system (fixed short timeouts) or requires the
+ * use of a timeout API instead of a counter and an
+ * unknown inner delay
+ */
+ usleep_range(10, 100);
+
+ } while (--rxtries > 0);
+ if (!tx_len && rx_len && !rxtries) {
+ /*
+ * not enough RX bytes even after several retries
+ * and the resulting rather long timeout?
+ */
+ rxcount = in_be32(&fifo->rxcnt);
+ dev_warn(&spi->dev,
+ "short xfer, missing %zd RX bytes, FIFO level %zd\n",
+ rx_len, rxcount);
}
- rxcount = min(rxcount, count);
- for (i = rxcount; i > 0; i--) {
- data = in_8(&fifo->rxdata_8);
- if (rx_buf)
- *rx_buf++ = data;
- }
- while (in_be32(&fifo->rxcnt)) {
- in_8(&fifo->rxdata_8);
+ /*
+ * drain and drop RX data which "should not be there" in
+ * the first place, for undisturbed transmission this turns
+ * into a NOP (except for the FIFO level fetch)
+ */
+ if (!tx_len && !rx_len) {
+ while (in_be32(&fifo->rxcnt))
+ in_8(&fifo->rxdata_8);
}
- out_8(&psc->command,
- MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
}
- /* disable transmiter/receiver and fifo interrupt */
- out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
- out_be32(&fifo->tximr, 0);
return 0;
}
-static void mpc512x_psc_spi_work(struct work_struct *work)
+static int mpc512x_psc_spi_msg_xfer(struct spi_master *master,
+ struct spi_message *m)
{
- struct mpc512x_psc_spi *mps = container_of(work,
- struct mpc512x_psc_spi,
- work);
-
- spin_lock_irq(&mps->lock);
- mps->busy = 1;
- while (!list_empty(&mps->queue)) {
- struct spi_message *m;
- struct spi_device *spi;
- struct spi_transfer *t = NULL;
- unsigned cs_change;
- int status;
-
- m = container_of(mps->queue.next, struct spi_message, queue);
- list_del_init(&m->queue);
- spin_unlock_irq(&mps->lock);
-
- spi = m->spi;
- cs_change = 1;
- status = 0;
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->bits_per_word || t->speed_hz) {
- status = mpc512x_psc_spi_transfer_setup(spi, t);
- if (status < 0)
- break;
- }
+ struct spi_device *spi;
+ unsigned cs_change;
+ int status;
+ struct spi_transfer *t;
+
+ spi = m->spi;
+ cs_change = 1;
+ status = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
+ status = mpc512x_psc_spi_transfer_setup(spi, t);
+ if (status < 0)
+ break;
+ }
- if (cs_change)
- mpc512x_psc_spi_activate_cs(spi);
- cs_change = t->cs_change;
+ if (cs_change)
+ mpc512x_psc_spi_activate_cs(spi);
+ cs_change = t->cs_change;
- status = mpc512x_psc_spi_transfer_rxtx(spi, t);
- if (status)
- break;
- m->actual_length += t->len;
+ status = mpc512x_psc_spi_transfer_rxtx(spi, t);
+ if (status)
+ break;
+ m->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
- if (cs_change)
- mpc512x_psc_spi_deactivate_cs(spi);
- }
+ if (cs_change)
+ mpc512x_psc_spi_deactivate_cs(spi);
+ }
- m->status = status;
+ m->status = status;
+ if (m->complete)
m->complete(m->context);
- if (status || !cs_change)
- mpc512x_psc_spi_deactivate_cs(spi);
+ if (status || !cs_change)
+ mpc512x_psc_spi_deactivate_cs(spi);
- mpc512x_psc_spi_transfer_setup(spi, NULL);
+ mpc512x_psc_spi_transfer_setup(spi, NULL);
- spin_lock_irq(&mps->lock);
- }
- mps->busy = 0;
- spin_unlock_irq(&mps->lock);
+ spi_finalize_current_message(master);
+ return status;
+}
+
+static int mpc512x_psc_spi_prep_xfer_hw(struct spi_master *master)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+
+ dev_dbg(&master->dev, "%s()\n", __func__);
+
+ /* Zero MR2 */
+ in_8(&psc->mode);
+ out_8(&psc->mode, 0x0);
+
+ /* enable transmitter/receiver */
+ out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
+
+ return 0;
+}
+
+static int mpc512x_psc_spi_unprep_xfer_hw(struct spi_master *master)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+
+ dev_dbg(&master->dev, "%s()\n", __func__);
+
+ /* disable transmitter/receiver and fifo interrupt */
+ out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+ out_be32(&fifo->tximr, 0);
+
+ return 0;
}
static int mpc512x_psc_spi_setup(struct spi_device *spi)
{
- struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
struct mpc512x_psc_spi_cs *cs = spi->controller_state;
- unsigned long flags;
+ int ret;
if (spi->bits_per_word % 8)
return -EINVAL;
@@ -286,39 +356,32 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi)
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
return -ENOMEM;
+
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "can't get CS gpio: %d\n",
+ ret);
+ kfree(cs);
+ return ret;
+ }
+ gpio_direction_output(spi->cs_gpio,
+ spi->mode & SPI_CS_HIGH ? 0 : 1);
+ }
+
spi->controller_state = cs;
}
cs->bits_per_word = spi->bits_per_word;
cs->speed_hz = spi->max_speed_hz;
- spin_lock_irqsave(&mps->lock, flags);
- if (!mps->busy)
- mpc512x_psc_spi_deactivate_cs(spi);
- spin_unlock_irqrestore(&mps->lock, flags);
-
- return 0;
-}
-
-static int mpc512x_psc_spi_transfer(struct spi_device *spi,
- struct spi_message *m)
-{
- struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
- unsigned long flags;
-
- m->actual_length = 0;
- m->status = -EINPROGRESS;
-
- spin_lock_irqsave(&mps->lock, flags);
- list_add_tail(&m->queue, &mps->queue);
- queue_work(mps->workqueue, &mps->work);
- spin_unlock_irqrestore(&mps->lock, flags);
-
return 0;
}
static void mpc512x_psc_spi_cleanup(struct spi_device *spi)
{
+ if (gpio_is_valid(spi->cs_gpio))
+ gpio_free(spi->cs_gpio);
kfree(spi->controller_state);
}
@@ -327,19 +390,11 @@ static int mpc512x_psc_spi_port_config(struct spi_master *master,
{
struct mpc52xx_psc __iomem *psc = mps->psc;
struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
- struct clk *spiclk;
- int ret = 0;
- char name[32];
u32 sicr;
u32 ccr;
+ int speed;
u16 bclkdiv;
- sprintf(name, "psc%d_mclk", master->bus_num);
- spiclk = clk_get(&master->dev, name);
- clk_enable(spiclk);
- mps->mclk = clk_get_rate(spiclk);
- clk_put(spiclk);
-
/* Reset the PSC into a known state */
out_8(&psc->command, MPC52xx_PSC_RST_RX);
out_8(&psc->command, MPC52xx_PSC_RST_TX);
@@ -366,7 +421,8 @@ static int mpc512x_psc_spi_port_config(struct spi_master *master,
ccr = in_be32(&psc->ccr);
ccr &= 0xFF000000;
- bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */
+ speed = 1000000; /* default 1MHz */
+ bclkdiv = (mps->mclk_rate / speed) - 1;
ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
out_be32(&psc->ccr, ccr);
@@ -386,7 +442,7 @@ static int mpc512x_psc_spi_port_config(struct spi_master *master,
mps->bits_per_word = 8;
- return ret;
+ return 0;
}
static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
@@ -394,27 +450,31 @@ static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id;
struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
- /* clear interrupt and wake up the work queue */
+ /* clear interrupt and wake up the rx/tx routine */
if (in_be32(&fifo->txisr) &
in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) {
out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
out_be32(&fifo->tximr, 0);
- complete(&mps->done);
+ complete(&mps->txisrdone);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
-/* bus_num is used only for the case dev->platform_data == NULL */
-static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
- u32 size, unsigned int irq,
- s16 bus_num)
+static void mpc512x_spi_cs_control(struct spi_device *spi, bool onoff)
+{
+ gpio_set_value(spi->cs_gpio, onoff);
+}
+
+static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
+ u32 size, unsigned int irq)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc512x_psc_spi *mps;
struct spi_master *master;
int ret;
void *tempp;
+ struct clk *clk;
master = spi_alloc_master(dev, sizeof *mps);
if (master == NULL)
@@ -425,25 +485,22 @@ static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
mps->irq = irq;
if (pdata == NULL) {
- dev_err(dev, "probe called without platform data, no "
- "cs_control function will be called\n");
- mps->cs_control = NULL;
- mps->sysclk = 0;
- master->bus_num = bus_num;
- master->num_chipselect = 255;
+ mps->cs_control = mpc512x_spi_cs_control;
} else {
mps->cs_control = pdata->cs_control;
- mps->sysclk = pdata->sysclk;
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->max_chipselect;
}
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
master->setup = mpc512x_psc_spi_setup;
- master->transfer = mpc512x_psc_spi_transfer;
+ master->prepare_transfer_hardware = mpc512x_psc_spi_prep_xfer_hw;
+ master->transfer_one_message = mpc512x_psc_spi_msg_xfer;
+ master->unprepare_transfer_hardware = mpc512x_psc_spi_unprep_xfer_hw;
master->cleanup = mpc512x_psc_spi_cleanup;
master->dev.of_node = dev->of_node;
- tempp = ioremap(regaddr, size);
+ tempp = devm_ioremap(dev, regaddr, size);
if (!tempp) {
dev_err(dev, "could not ioremap I/O port range\n");
ret = -EFAULT;
@@ -452,67 +509,68 @@ static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
mps->psc = tempp;
mps->fifo =
(struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc));
+ ret = devm_request_irq(dev, mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED,
+ "mpc512x-psc-spi", mps);
+ if (ret)
+ goto free_master;
+ init_completion(&mps->txisrdone);
- ret = request_irq(mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED,
- "mpc512x-psc-spi", mps);
+ clk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto free_master;
+ }
+ ret = clk_prepare_enable(clk);
if (ret)
goto free_master;
+ mps->clk_mclk = clk;
+ mps->mclk_rate = clk_get_rate(clk);
+
+ clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto free_mclk_clock;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto free_mclk_clock;
+ mps->clk_ipg = clk;
ret = mpc512x_psc_spi_port_config(master, mps);
if (ret < 0)
- goto free_irq;
-
- spin_lock_init(&mps->lock);
- init_completion(&mps->done);
- INIT_WORK(&mps->work, mpc512x_psc_spi_work);
- INIT_LIST_HEAD(&mps->queue);
-
- mps->workqueue =
- create_singlethread_workqueue(dev_name(master->dev.parent));
- if (mps->workqueue == NULL) {
- ret = -EBUSY;
- goto free_irq;
- }
+ goto free_ipg_clock;
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret < 0)
- goto unreg_master;
+ goto free_ipg_clock;
return ret;
-unreg_master:
- destroy_workqueue(mps->workqueue);
-free_irq:
- free_irq(mps->irq, mps);
+free_ipg_clock:
+ clk_disable_unprepare(mps->clk_ipg);
+free_mclk_clock:
+ clk_disable_unprepare(mps->clk_mclk);
free_master:
- if (mps->psc)
- iounmap(mps->psc);
spi_master_put(master);
return ret;
}
-static int __devexit mpc512x_psc_spi_do_remove(struct device *dev)
+static int mpc512x_psc_spi_do_remove(struct device *dev)
{
- struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
- flush_workqueue(mps->workqueue);
- destroy_workqueue(mps->workqueue);
- spi_unregister_master(master);
- free_irq(mps->irq, mps);
- if (mps->psc)
- iounmap(mps->psc);
- spi_master_put(master);
+ clk_disable_unprepare(mps->clk_mclk);
+ clk_disable_unprepare(mps->clk_ipg);
return 0;
}
-static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op)
+static int mpc512x_psc_spi_of_probe(struct platform_device *op)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
- s16 id = -1;
regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
if (!regaddr_p) {
@@ -521,25 +579,11 @@ static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op)
}
regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
- /* get PSC id (0..11, used by port_config) */
- if (op->dev.platform_data == NULL) {
- const u32 *psc_nump;
-
- psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL);
- if (!psc_nump || *psc_nump > 11) {
- dev_err(&op->dev, "mpc512x_psc_spi: Device node %s "
- "has invalid cell-index property\n",
- op->dev.of_node->full_name);
- return -EINVAL;
- }
- id = *psc_nump;
- }
-
return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
- irq_of_parse_and_map(op->dev.of_node, 0), id);
+ irq_of_parse_and_map(op->dev.of_node, 0));
}
-static int __devexit mpc512x_psc_spi_of_remove(struct platform_device *op)
+static int mpc512x_psc_spi_of_remove(struct platform_device *op)
{
return mpc512x_psc_spi_do_remove(&op->dev);
}
@@ -553,7 +597,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match);
static struct platform_driver mpc512x_psc_spi_of_driver = {
.probe = mpc512x_psc_spi_of_probe,
- .remove = __devexit_p(mpc512x_psc_spi_of_remove),
+ .remove = mpc512x_psc_spi_of_remove,
.driver = {
.name = "mpc512x-psc-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index bd47d262d53..de532aa11d3 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -248,7 +247,8 @@ static void mpc52xx_psc_spi_work(struct work_struct *work)
}
m->status = status;
- m->complete(m->context);
+ if (m->complete)
+ m->complete(m->context);
if (status || !cs_change)
mpc52xx_psc_spi_deactivate_cs(spi);
@@ -363,10 +363,10 @@ static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
}
/* bus_num is used only for the case dev->platform_data == NULL */
-static int __devinit mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
+static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
u32 size, unsigned int irq, s16 bus_num)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc52xx_psc_spi *mps;
struct spi_master *master;
int ret;
@@ -383,8 +383,8 @@ static int __devinit mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
mps->irq = irq;
if (pdata == NULL) {
- dev_warn(dev, "probe called without platform data, no "
- "cs_control function will be called\n");
+ dev_warn(dev,
+ "probe called without platform data, no cs_control function will be called\n");
mps->cs_control = NULL;
mps->sysclk = 0;
master->bus_num = bus_num;
@@ -450,7 +450,7 @@ free_master:
return ret;
}
-static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op)
+static int mpc52xx_psc_spi_of_probe(struct platform_device *op)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
@@ -479,9 +479,9 @@ static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op)
irq_of_parse_and_map(op->dev.of_node, 0), id);
}
-static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op)
+static int mpc52xx_psc_spi_of_remove(struct platform_device *op)
{
- struct spi_master *master = spi_master_get(dev_get_drvdata(&op->dev));
+ struct spi_master *master = spi_master_get(platform_get_drvdata(op));
struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
flush_workqueue(mps->workqueue);
@@ -505,7 +505,7 @@ MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match);
static struct platform_driver mpc52xx_psc_spi_of_driver = {
.probe = mpc52xx_psc_spi_of_probe,
- .remove = __devexit_p(mpc52xx_psc_spi_of_remove),
+ .remove = mpc52xx_psc_spi_of_remove,
.driver = {
.name = "mpc52xx-psc-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 04541065021..b07db4b62d8 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
@@ -235,7 +234,8 @@ static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
dev_err(&ms->master->dev, "mode fault\n");
mpc52xx_spi_chipsel(ms, 0);
ms->message->status = -EIO;
- ms->message->complete(ms->message->context);
+ if (ms->message->complete)
+ ms->message->complete(ms->message->context);
ms->state = mpc52xx_spi_fsmstate_idle;
return FSM_CONTINUE;
}
@@ -289,7 +289,8 @@ mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
ms->msg_count++;
mpc52xx_spi_chipsel(ms, 0);
ms->message->status = 0;
- ms->message->complete(ms->message->context);
+ if (ms->message->complete)
+ ms->message->complete(ms->message->context);
ms->state = mpc52xx_spi_fsmstate_idle;
return FSM_CONTINUE;
}
@@ -357,20 +358,6 @@ static void mpc52xx_spi_wq(struct work_struct *work)
* spi_master ops
*/
-static int mpc52xx_spi_setup(struct spi_device *spi)
-{
- if (spi->bits_per_word % 8)
- return -EINVAL;
-
- if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST))
- return -EINVAL;
-
- if (spi->chip_select >= spi->master->num_chipselect)
- return -EINVAL;
-
- return 0;
-}
-
static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
{
struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
@@ -390,7 +377,7 @@ static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
/*
* OF Platform Bus Binding
*/
-static int __devinit mpc52xx_spi_probe(struct platform_device *op)
+static int mpc52xx_spi_probe(struct platform_device *op)
{
struct spi_master *master;
struct mpc52xx_spi *ms;
@@ -433,12 +420,12 @@ static int __devinit mpc52xx_spi_probe(struct platform_device *op)
goto err_alloc;
}
- master->setup = mpc52xx_spi_setup;
master->transfer = mpc52xx_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = op->dev.of_node;
- dev_set_drvdata(&op->dev, master);
+ platform_set_drvdata(op, master);
ms = spi_master_get_devdata(master);
ms->master = master;
@@ -527,9 +514,9 @@ static int __devinit mpc52xx_spi_probe(struct platform_device *op)
return rc;
}
-static int __devexit mpc52xx_spi_remove(struct platform_device *op)
+static int mpc52xx_spi_remove(struct platform_device *op)
{
- struct spi_master *master = spi_master_get(dev_get_drvdata(&op->dev));
+ struct spi_master *master = spi_master_get(platform_get_drvdata(op));
struct mpc52xx_spi *ms = spi_master_get_devdata(master);
int i;
@@ -547,7 +534,7 @@ static int __devexit mpc52xx_spi_remove(struct platform_device *op)
return 0;
}
-static const struct of_device_id mpc52xx_spi_match[] __devinitconst = {
+static const struct of_device_id mpc52xx_spi_match[] = {
{ .compatible = "fsl,mpc5200-spi", },
{}
};
@@ -560,6 +547,6 @@ static struct platform_driver mpc52xx_spi_of_driver = {
.of_match_table = mpc52xx_spi_match,
},
.probe = mpc52xx_spi_probe,
- .remove = __devexit_p(mpc52xx_spi_remove),
+ .remove = mpc52xx_spi_remove,
};
module_platform_driver(mpc52xx_spi_of_driver);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index edf1360ab09..2884f0c2f5f 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -29,7 +29,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -46,7 +45,6 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/stmp_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/mxs-spi.h>
@@ -58,45 +56,53 @@
#define SG_MAXLEN 0xff00
+/*
+ * Flags for txrx functions. More efficient that using an argument register for
+ * each one.
+ */
+#define TXRX_WRITE (1<<0) /* This is a write */
+#define TXRX_DEASSERT_CS (1<<1) /* De-assert CS at end of txrx */
+
struct mxs_spi {
struct mxs_ssp ssp;
struct completion c;
+ unsigned int sck; /* Rate requested (vs actual) */
};
static int mxs_spi_setup_transfer(struct spi_device *dev,
- struct spi_transfer *t)
+ const struct spi_transfer *t)
{
struct mxs_spi *spi = spi_master_get_devdata(dev->master);
struct mxs_ssp *ssp = &spi->ssp;
- uint8_t bits_per_word;
- uint32_t hz = 0;
+ const unsigned int hz = min(dev->max_speed_hz, t->speed_hz);
- bits_per_word = dev->bits_per_word;
- if (t && t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
- if (bits_per_word != 8) {
- dev_err(&dev->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, bits_per_word);
+ if (hz == 0) {
+ dev_err(&dev->dev, "SPI clock rate of zero not allowed\n");
return -EINVAL;
}
- hz = dev->max_speed_hz;
- if (t && t->speed_hz)
- hz = min(hz, t->speed_hz);
- if (hz == 0) {
- dev_err(&dev->dev, "Cannot continue with zero clock\n");
- return -EINVAL;
+ if (hz != spi->sck) {
+ mxs_ssp_set_clk_rate(ssp, hz);
+ /*
+ * Save requested rate, hz, rather than the actual rate,
+ * ssp->clk_rate. Otherwise we would set the rate every trasfer
+ * when the actual rate is not quite the same as requested rate.
+ */
+ spi->sck = hz;
+ /*
+ * Perhaps we should return an error if the actual clock is
+ * nowhere close to what was requested?
+ */
}
- mxs_ssp_set_clk_rate(ssp, hz);
+ writel(BM_SSP_CTRL0_LOCK_CS,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
- BF_SSP_CTRL1_WORD_LENGTH
- (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
- ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
- ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
- ssp->base + HW_SSP_CTRL1(ssp));
+ BF_SSP_CTRL1_WORD_LENGTH(BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
+ ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
+ ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
+ ssp->base + HW_SSP_CTRL1(ssp));
writel(0x0, ssp->base + HW_SSP_CMD0);
writel(0x0, ssp->base + HW_SSP_CMD1);
@@ -104,28 +110,9 @@ static int mxs_spi_setup_transfer(struct spi_device *dev,
return 0;
}
-static int mxs_spi_setup(struct spi_device *dev)
+static u32 mxs_spi_cs_to_reg(unsigned cs)
{
- int err = 0;
-
- if (!dev->bits_per_word)
- dev->bits_per_word = 8;
-
- if (dev->mode & ~(SPI_CPOL | SPI_CPHA))
- return -EINVAL;
-
- err = mxs_spi_setup_transfer(dev, NULL);
- if (err) {
- dev_err(&dev->dev,
- "Failed to setup transfer, error = %d\n", err);
- }
-
- return err;
-}
-
-static uint32_t mxs_spi_cs_to_reg(unsigned cs)
-{
- uint32_t select = 0;
+ u32 select = 0;
/*
* i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0
@@ -143,43 +130,11 @@ static uint32_t mxs_spi_cs_to_reg(unsigned cs)
return select;
}
-static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs)
-{
- const uint32_t mask =
- BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ;
- uint32_t select;
- struct mxs_ssp *ssp = &spi->ssp;
-
- writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
- select = mxs_spi_cs_to_reg(cs);
- writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
-}
-
-static inline void mxs_spi_enable(struct mxs_spi *spi)
-{
- struct mxs_ssp *ssp = &spi->ssp;
-
- writel(BM_SSP_CTRL0_LOCK_CS,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
- writel(BM_SSP_CTRL0_IGNORE_CRC,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
-}
-
-static inline void mxs_spi_disable(struct mxs_spi *spi)
-{
- struct mxs_ssp *ssp = &spi->ssp;
-
- writel(BM_SSP_CTRL0_LOCK_CS,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
- writel(BM_SSP_CTRL0_IGNORE_CRC,
- ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
-}
-
static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
{
const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
struct mxs_ssp *ssp = &spi->ssp;
- uint32_t reg;
+ u32 reg;
do {
reg = readl_relaxed(ssp->base + offset);
@@ -212,9 +167,9 @@ static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
+static int mxs_spi_txrx_dma(struct mxs_spi *spi,
unsigned char *buf, int len,
- int *first, int *last, int write)
+ unsigned int flags)
{
struct mxs_ssp *ssp = &spi->ssp;
struct dma_async_tx_descriptor *desc = NULL;
@@ -223,11 +178,11 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
const int sgs = DIV_ROUND_UP(len, desc_len);
int sg_count;
int min, ret;
- uint32_t ctrl0;
+ u32 ctrl0;
struct page *vm_page;
void *sg_buf;
struct {
- uint32_t pio[4];
+ u32 pio[4];
struct scatterlist sg;
} *dma_xfer;
@@ -238,26 +193,33 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
if (!dma_xfer)
return -ENOMEM;
- INIT_COMPLETION(spi->c);
+ reinit_completion(&spi->c);
+ /* Chip select was already programmed into CTRL0 */
ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
- ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs);
+ ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC |
+ BM_SSP_CTRL0_READ);
+ ctrl0 |= BM_SSP_CTRL0_DATA_XFER;
- if (*first)
- ctrl0 |= BM_SSP_CTRL0_LOCK_CS;
- if (!write)
+ if (!(flags & TXRX_WRITE))
ctrl0 |= BM_SSP_CTRL0_READ;
/* Queue the DMA data transfer. */
for (sg_count = 0; sg_count < sgs; sg_count++) {
+ /* Prepare the transfer descriptor. */
min = min(len, desc_len);
- /* Prepare the transfer descriptor. */
- if ((sg_count + 1 == sgs) && *last)
+ /*
+ * De-assert CS on last segment if flag is set (i.e., no more
+ * transfers will follow)
+ */
+ if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS))
ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
- if (ssp->devid == IMX23_SSP)
+ if (ssp->devid == IMX23_SSP) {
+ ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
ctrl0 |= min;
+ }
dma_xfer[sg_count].pio[0] = ctrl0;
dma_xfer[sg_count].pio[3] = min;
@@ -276,7 +238,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min);
ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
- write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
len -= min;
buf += min;
@@ -296,7 +258,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
desc = dmaengine_prep_slave_sg(ssp->dmach,
&dma_xfer[sg_count].sg, 1,
- write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
@@ -323,6 +285,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
if (!ret) {
dev_err(ssp->dev, "DMA transfer timeout\n");
ret = -ETIMEDOUT;
+ dmaengine_terminate_all(ssp->dmach);
goto err_vmalloc;
}
@@ -332,7 +295,7 @@ err_vmalloc:
while (--sg_count >= 0) {
err_mapped:
dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
- write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
kfree(dma_xfer);
@@ -340,20 +303,19 @@ err_mapped:
return ret;
}
-static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
+static int mxs_spi_txrx_pio(struct mxs_spi *spi,
unsigned char *buf, int len,
- int *first, int *last, int write)
+ unsigned int flags)
{
struct mxs_ssp *ssp = &spi->ssp;
- if (*first)
- mxs_spi_enable(spi);
-
- mxs_spi_set_cs(spi, cs);
+ writel(BM_SSP_CTRL0_IGNORE_CRC,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
while (len--) {
- if (*last && len == 0)
- mxs_spi_disable(spi);
+ if (len == 0 && (flags & TXRX_DEASSERT_CS))
+ writel(BM_SSP_CTRL0_IGNORE_CRC,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
if (ssp->devid == IMX23_SSP) {
writel(BM_SSP_CTRL0_XFER_COUNT,
@@ -364,7 +326,7 @@ static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
writel(1, ssp->base + HW_SSP_XFER_SIZE);
}
- if (write)
+ if (flags & TXRX_WRITE)
writel(BM_SSP_CTRL0_READ,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
else
@@ -377,13 +339,13 @@ static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
return -ETIMEDOUT;
- if (write)
+ if (flags & TXRX_WRITE)
writel(*buf, ssp->base + HW_SSP_DATA(ssp));
writel(BM_SSP_CTRL0_DATA_XFER,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
- if (!write) {
+ if (!(flags & TXRX_WRITE)) {
if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
BM_SSP_STATUS_FIFO_EMPTY, 0))
return -ETIMEDOUT;
@@ -408,31 +370,25 @@ static int mxs_spi_transfer_one(struct spi_master *master,
{
struct mxs_spi *spi = spi_master_get_devdata(master);
struct mxs_ssp *ssp = &spi->ssp;
- int first, last;
- struct spi_transfer *t, *tmp_t;
+ struct spi_transfer *t;
+ unsigned int flag;
int status = 0;
- int cs;
- first = last = 0;
+ /* Program CS register bits here, it will be used for all transfers. */
+ writel(BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(mxs_spi_cs_to_reg(m->spi->chip_select),
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
- cs = m->spi->chip_select;
-
- list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
status = mxs_spi_setup_transfer(m->spi, t);
if (status)
break;
- if (&t->transfer_list == m->transfers.next)
- first = 1;
- if (&t->transfer_list == m->transfers.prev)
- last = 1;
- if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) {
- dev_err(ssp->dev,
- "Cannot send and receive simultaneously\n");
- status = -EINVAL;
- break;
- }
+ /* De-assert on last transfer, inverted by cs_change flag */
+ flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
+ TXRX_DEASSERT_CS : 0;
/*
* Small blocks can be transfered via PIO.
@@ -449,26 +405,26 @@ static int mxs_spi_transfer_one(struct spi_master *master,
STMP_OFFSET_REG_CLR);
if (t->tx_buf)
- status = mxs_spi_txrx_pio(spi, cs,
+ status = mxs_spi_txrx_pio(spi,
(void *)t->tx_buf,
- t->len, &first, &last, 1);
+ t->len, flag | TXRX_WRITE);
if (t->rx_buf)
- status = mxs_spi_txrx_pio(spi, cs,
+ status = mxs_spi_txrx_pio(spi,
t->rx_buf, t->len,
- &first, &last, 0);
+ flag);
} else {
writel(BM_SSP_CTRL1_DMA_ENABLE,
ssp->base + HW_SSP_CTRL1(ssp) +
STMP_OFFSET_REG_SET);
if (t->tx_buf)
- status = mxs_spi_txrx_dma(spi, cs,
+ status = mxs_spi_txrx_dma(spi,
(void *)t->tx_buf, t->len,
- &first, &last, 1);
+ flag | TXRX_WRITE);
if (t->rx_buf)
- status = mxs_spi_txrx_dma(spi, cs,
+ status = mxs_spi_txrx_dma(spi,
t->rx_buf, t->len,
- &first, &last, 0);
+ flag);
}
if (status) {
@@ -477,30 +433,14 @@ static int mxs_spi_transfer_one(struct spi_master *master,
}
m->actual_length += t->len;
- first = last = 0;
}
- m->status = 0;
+ m->status = status;
spi_finalize_current_message(master);
return status;
}
-static bool mxs_ssp_dma_filter(struct dma_chan *chan, void *param)
-{
- struct mxs_ssp *ssp = param;
-
- if (!mxs_dma_is_apbh(chan))
- return false;
-
- if (chan->chan_id != ssp->dma_channel)
- return false;
-
- chan->private = &ssp->dma_data;
-
- return true;
-}
-
static const struct of_device_id mxs_spi_dt_ids[] = {
{ .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
{ .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
@@ -508,7 +448,7 @@ static const struct of_device_id mxs_spi_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
-static int __devinit mxs_spi_probe(struct platform_device *pdev)
+static int mxs_spi_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxs_spi_dt_ids, &pdev->dev);
@@ -516,13 +456,11 @@ static int __devinit mxs_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct mxs_spi *spi;
struct mxs_ssp *ssp;
- struct resource *iores, *dmares;
- struct pinctrl *pinctrl;
+ struct resource *iores;
struct clk *clk;
void __iomem *base;
- int devid, dma_channel, clk_freq;
- int ret = 0, irq_err, irq_dma;
- dma_cap_mask_t mask;
+ int devid, clk_freq;
+ int ret = 0, irq_err;
/*
* Default clock speed for the SPI core. 160MHz seems to
@@ -533,55 +471,29 @@ static int __devinit mxs_spi_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_err = platform_get_irq(pdev, 0);
- irq_dma = platform_get_irq(pdev, 1);
- if (!iores || irq_err < 0 || irq_dma < 0)
- return -EINVAL;
-
- base = devm_request_and_ioremap(&pdev->dev, iores);
- if (!base)
- return -EADDRNOTAVAIL;
+ if (irq_err < 0)
+ return irq_err;
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl))
- return PTR_ERR(pinctrl);
+ base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- if (np) {
- devid = (enum mxs_ssp_id) of_id->data;
- /*
- * TODO: This is a temporary solution and should be changed
- * to use generic DMA binding later when the helpers get in.
- */
- ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
- &dma_channel);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to get DMA channel\n");
- return -EINVAL;
- }
-
- ret = of_property_read_u32(np, "clock-frequency",
- &clk_freq);
- if (ret)
- clk_freq = clk_freq_default;
- } else {
- dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!dmares)
- return -EINVAL;
- devid = pdev->id_entry->driver_data;
- dma_channel = dmares->start;
+ devid = (enum mxs_ssp_id) of_id->data;
+ ret = of_property_read_u32(np, "clock-frequency",
+ &clk_freq);
+ if (ret)
clk_freq = clk_freq_default;
- }
master = spi_alloc_master(&pdev->dev, sizeof(*spi));
if (!master)
return -ENOMEM;
master->transfer_one_message = mxs_spi_transfer_one;
- master->setup = mxs_spi_setup;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->num_chipselect = 3;
master->dev.of_node = np;
@@ -593,7 +505,6 @@ static int __devinit mxs_spi_probe(struct platform_device *pdev)
ssp->clk = clk;
ssp->base = base;
ssp->devid = devid;
- ssp->dma_channel = dma_channel;
init_completion(&spi->c);
@@ -602,63 +513,61 @@ static int __devinit mxs_spi_probe(struct platform_device *pdev)
if (ret)
goto out_master_free;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- ssp->dma_data.chan_irq = irq_dma;
- ssp->dmach = dma_request_channel(mask, mxs_ssp_dma_filter, ssp);
+ ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
if (!ssp->dmach) {
dev_err(ssp->dev, "Failed to request DMA\n");
+ ret = -ENODEV;
goto out_master_free;
}
- clk_prepare_enable(ssp->clk);
+ ret = clk_prepare_enable(ssp->clk);
+ if (ret)
+ goto out_dma_release;
+
clk_set_rate(ssp->clk, clk_freq);
- ssp->clk_rate = clk_get_rate(ssp->clk) / 1000;
- stmp_reset_block(ssp->base);
+ ret = stmp_reset_block(ssp->base);
+ if (ret)
+ goto out_disable_clk;
platform_set_drvdata(pdev, master);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
- goto out_free_dma;
+ goto out_disable_clk;
}
return 0;
-out_free_dma:
- dma_release_channel(ssp->dmach);
+out_disable_clk:
clk_disable_unprepare(ssp->clk);
+out_dma_release:
+ dma_release_channel(ssp->dmach);
out_master_free:
spi_master_put(master);
return ret;
}
-static int __devexit mxs_spi_remove(struct platform_device *pdev)
+static int mxs_spi_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct mxs_spi *spi;
struct mxs_ssp *ssp;
- master = spi_master_get(platform_get_drvdata(pdev));
+ master = platform_get_drvdata(pdev);
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
- spi_unregister_master(master);
-
- dma_release_channel(ssp->dmach);
-
clk_disable_unprepare(ssp->clk);
-
- spi_master_put(master);
+ dma_release_channel(ssp->dmach);
return 0;
}
static struct platform_driver mxs_spi_driver = {
.probe = mxs_spi_probe,
- .remove = __devexit_p(mxs_spi_remove),
+ .remove = mxs_spi_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index a6eca6ffdab..73e91d5a43d 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -9,9 +9,7 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -38,7 +36,9 @@
/* usi register bit */
#define ENINT (0x01 << 17)
#define ENFLG (0x01 << 16)
+#define SLEEP (0x0f << 12)
#define TXNUM (0x03 << 8)
+#define TXBITLEN (0x1f << 3)
#define TXNEG (0x01 << 2)
#define RXNEG (0x01 << 1)
#define LSB (0x01 << 10)
@@ -57,13 +57,9 @@ struct nuc900_spi {
const unsigned char *tx;
unsigned char *rx;
struct clk *clk;
- struct resource *ioarea;
struct spi_master *master;
- struct spi_device *curdev;
- struct device *dev;
struct nuc900_spi_info *pdata;
spinlock_t lock;
- struct resource *res;
};
static inline struct nuc900_spi *to_hw(struct spi_device *sdev)
@@ -120,19 +116,16 @@ static void nuc900_spi_chipsel(struct spi_device *spi, int value)
}
}
-static void nuc900_spi_setup_txnum(struct nuc900_spi *hw,
- unsigned int txnum)
+static void nuc900_spi_setup_txnum(struct nuc900_spi *hw, unsigned int txnum)
{
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~TXNUM;
- if (!txnum)
- val &= ~TXNUM;
- else
+ if (txnum)
val |= txnum << 0x08;
__raw_writel(val, hw->regs + USI_CNT);
@@ -149,7 +142,7 @@ static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw,
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~TXBITLEN;
val |= (txbitlen << 0x03);
@@ -174,17 +167,6 @@ static void nuc900_spi_gobusy(struct nuc900_spi *hw)
spin_unlock_irqrestore(&hw->lock, flags);
}
-static int nuc900_spi_setupxfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- return 0;
-}
-
-static int nuc900_spi_setup(struct spi_device *spi)
-{
- return 0;
-}
-
static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count)
{
return hw->tx ? hw->tx[count] : 0;
@@ -299,12 +281,11 @@ static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep)
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~SLEEP;
if (sleep)
val |= (sleep << 12);
- else
- val &= ~(0x0f << 12);
+
__raw_writel(val, hw->regs + USI_CNT);
spin_unlock_irqrestore(&hw->lock, flags);
@@ -346,23 +327,22 @@ static void nuc900_init_spi(struct nuc900_spi *hw)
nuc900_enable_int(hw);
}
-static int __devinit nuc900_spi_probe(struct platform_device *pdev)
+static int nuc900_spi_probe(struct platform_device *pdev)
{
struct nuc900_spi *hw;
struct spi_master *master;
+ struct resource *res;
int err = 0;
master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
if (master == NULL) {
dev_err(&pdev->dev, "No memory for spi_master\n");
- err = -ENOMEM;
- goto err_nomem;
+ return -ENOMEM;
}
hw = spi_master_get_devdata(master);
- hw->master = spi_master_get(master);
- hw->pdata = pdev->dev.platform_data;
- hw->dev = &pdev->dev;
+ hw->master = master;
+ hw->pdata = dev_get_platdata(&pdev->dev);
if (hw->pdata == NULL) {
dev_err(&pdev->dev, "No platform data supplied\n");
@@ -373,56 +353,41 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
init_completion(&hw->done);
- master->mode_bits = SPI_MODE_0;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ if (hw->pdata->lsb)
+ master->mode_bits |= SPI_LSB_FIRST;
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = hw->pdata->bus_num;
hw->bitbang.master = hw->master;
- hw->bitbang.setup_transfer = nuc900_spi_setupxfer;
hw->bitbang.chipselect = nuc900_spi_chipsel;
hw->bitbang.txrx_bufs = nuc900_spi_txrx;
- hw->bitbang.master->setup = nuc900_spi_setup;
- hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (hw->res == NULL) {
- dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- err = -ENOENT;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->regs)) {
+ err = PTR_ERR(hw->regs);
goto err_pdata;
}
- hw->ioarea = request_mem_region(hw->res->start,
- resource_size(hw->res), pdev->name);
-
- if (hw->ioarea == NULL) {
- dev_err(&pdev->dev, "Cannot reserve region\n");
- err = -ENXIO;
- goto err_pdata;
- }
-
- hw->regs = ioremap(hw->res->start, resource_size(hw->res));
- if (hw->regs == NULL) {
- dev_err(&pdev->dev, "Cannot map IO\n");
- err = -ENXIO;
- goto err_iomap;
- }
-
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq < 0) {
dev_err(&pdev->dev, "No IRQ specified\n");
err = -ENOENT;
- goto err_irq;
+ goto err_pdata;
}
- err = request_irq(hw->irq, nuc900_spi_irq, 0, pdev->name, hw);
+ err = devm_request_irq(&pdev->dev, hw->irq, nuc900_spi_irq, 0,
+ pdev->name, hw);
if (err) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto err_irq;
+ goto err_pdata;
}
- hw->clk = clk_get(&pdev->dev, "spi");
+ hw->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(hw->clk)) {
dev_err(&pdev->dev, "No clock for device\n");
err = PTR_ERR(hw->clk);
- goto err_clk;
+ goto err_pdata;
}
mfp_set_groupg(&pdev->dev, NULL);
@@ -438,46 +403,24 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
err_register:
clk_disable(hw->clk);
- clk_put(hw->clk);
-err_clk:
- free_irq(hw->irq, hw);
-err_irq:
- iounmap(hw->regs);
-err_iomap:
- release_mem_region(hw->res->start, resource_size(hw->res));
- kfree(hw->ioarea);
err_pdata:
spi_master_put(hw->master);
-
-err_nomem:
return err;
}
-static int __devexit nuc900_spi_remove(struct platform_device *dev)
+static int nuc900_spi_remove(struct platform_device *dev)
{
struct nuc900_spi *hw = platform_get_drvdata(dev);
- free_irq(hw->irq, hw);
-
- platform_set_drvdata(dev, NULL);
-
spi_bitbang_stop(&hw->bitbang);
-
clk_disable(hw->clk);
- clk_put(hw->clk);
-
- iounmap(hw->regs);
-
- release_mem_region(hw->res->start, resource_size(hw->res));
- kfree(hw->ioarea);
-
spi_master_put(hw->master);
return 0;
}
static struct platform_driver nuc900_spi_driver = {
.probe = nuc900_spi_probe,
- .remove = __devexit_p(nuc900_spi_remove),
+ .remove = nuc900_spi_remove,
.driver = {
.name = "nuc900-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 9d9071b730b..8998d11c723 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -15,7 +15,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -54,7 +53,7 @@ struct tiny_spi {
unsigned int txc, rxc;
const u8 *txp;
u8 *rxp;
- unsigned int gpio_cs_count;
+ int gpio_cs_count;
int *gpio_cs;
};
@@ -74,7 +73,7 @@ static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
{
struct tiny_spi *hw = tiny_spi_to_hw(spi);
- if (hw->gpio_cs_count) {
+ if (hw->gpio_cs_count > 0) {
gpio_set_value(hw->gpio_cs[spi->chip_select],
(spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
}
@@ -153,62 +152,22 @@ static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
}
wait_for_completion(&hw->done);
- } else if (txp && rxp) {
- /* we need to tighten the transfer loop */
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
- u8 rx, tx = *txp++;
- tiny_spi_wait_txr(hw);
- rx = readb(hw->base + TINY_SPI_TXDATA);
- writeb(tx, hw->base + TINY_SPI_TXDATA);
- *rxp++ = rx;
- }
- tiny_spi_wait_txr(hw);
- *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
- }
- tiny_spi_wait_txe(hw);
- *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
- } else if (rxp) {
- writeb(0, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(0,
- hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
- u8 rx;
- tiny_spi_wait_txr(hw);
- rx = readb(hw->base + TINY_SPI_TXDATA);
- writeb(0, hw->base + TINY_SPI_TXDATA);
- *rxp++ = rx;
- }
- tiny_spi_wait_txr(hw);
- *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
- }
- tiny_spi_wait_txe(hw);
- *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
- } else if (txp) {
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
- u8 tx = *txp++;
- tiny_spi_wait_txr(hw);
- writeb(tx, hw->base + TINY_SPI_TXDATA);
- }
- }
- tiny_spi_wait_txe(hw);
} else {
- writeb(0, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(0, hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
+ /* we need to tighten the transfer loop */
+ writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
+ for (i = 1; i < t->len; i++) {
+ writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
+
+ if (rxp || (i != t->len - 1))
tiny_spi_wait_txr(hw);
- writeb(0, hw->base + TINY_SPI_TXDATA);
- }
+ if (rxp)
+ *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
}
tiny_spi_wait_txe(hw);
+ if (rxp)
+ *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
}
+
return t->len;
}
@@ -243,7 +202,7 @@ static irqreturn_t tiny_spi_irq(int irq, void *dev)
#ifdef CONFIG_OF
#include <linux/of_gpio.h>
-static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
+static int tiny_spi_of_probe(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
@@ -254,7 +213,7 @@ static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
if (!np)
return 0;
hw->gpio_cs_count = of_gpio_count(np);
- if (hw->gpio_cs_count) {
+ if (hw->gpio_cs_count > 0) {
hw->gpio_cs = devm_kzalloc(&pdev->dev,
hw->gpio_cs_count * sizeof(unsigned int),
GFP_KERNEL);
@@ -277,15 +236,15 @@ static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
return 0;
}
#else /* !CONFIG_OF */
-static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
+static int tiny_spi_of_probe(struct platform_device *pdev)
{
return 0;
}
#endif /* CONFIG_OF */
-static int __devinit tiny_spi_probe(struct platform_device *pdev)
+static int tiny_spi_probe(struct platform_device *pdev)
{
- struct tiny_spi_platform_data *platp = pdev->dev.platform_data;
+ struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct tiny_spi *hw;
struct spi_master *master;
struct resource *res;
@@ -306,24 +265,18 @@ static int __devinit tiny_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
/* setup the state for the bitbang driver */
- hw->bitbang.master = spi_master_get(master);
- if (!hw->bitbang.master)
- return err;
+ hw->bitbang.master = master;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
hw->bitbang.chipselect = tiny_spi_chipselect;
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
/* find and map our resources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- goto exit_busy;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name))
- goto exit_busy;
- hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!hw->base)
- goto exit_busy;
+ hw->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base)) {
+ err = PTR_ERR(hw->base);
+ goto exit;
+ }
/* irq is optional */
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq >= 0) {
@@ -337,8 +290,10 @@ static int __devinit tiny_spi_probe(struct platform_device *pdev)
if (platp) {
hw->gpio_cs_count = platp->gpio_cs_count;
hw->gpio_cs = platp->gpio_cs;
- if (platp->gpio_cs_count && !platp->gpio_cs)
- goto exit_busy;
+ if (platp->gpio_cs_count && !platp->gpio_cs) {
+ err = -EBUSY;
+ goto exit;
+ }
hw->freq = platp->freq;
hw->baudwidth = platp->baudwidth;
} else {
@@ -352,7 +307,7 @@ static int __devinit tiny_spi_probe(struct platform_device *pdev)
goto exit_gpio;
gpio_direction_output(hw->gpio_cs[i], 1);
}
- hw->bitbang.master->num_chipselect = max(1U, hw->gpio_cs_count);
+ hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count);
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
@@ -365,15 +320,12 @@ static int __devinit tiny_spi_probe(struct platform_device *pdev)
exit_gpio:
while (i-- > 0)
gpio_free(hw->gpio_cs[i]);
-exit_busy:
- err = -EBUSY;
exit:
- platform_set_drvdata(pdev, NULL);
spi_master_put(master);
return err;
}
-static int __devexit tiny_spi_remove(struct platform_device *pdev)
+static int tiny_spi_remove(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct spi_master *master = hw->bitbang.master;
@@ -382,7 +334,6 @@ static int __devexit tiny_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&hw->bitbang);
for (i = 0; i < hw->gpio_cs_count; i++)
gpio_free(hw->gpio_cs[i]);
- platform_set_drvdata(pdev, NULL);
spi_master_put(master);
return 0;
}
@@ -393,18 +344,16 @@ static const struct of_device_id tiny_spi_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, tiny_spi_match);
-#else /* CONFIG_OF */
-#define tiny_spi_match NULL
#endif /* CONFIG_OF */
static struct platform_driver tiny_spi_driver = {
.probe = tiny_spi_probe,
- .remove = __devexit_p(tiny_spi_remove),
+ .remove = tiny_spi_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.pm = NULL,
- .of_match_table = tiny_spi_match,
+ .of_match_table = of_match_ptr(tiny_spi_match),
},
};
module_platform_driver(tiny_spi_driver);
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index ea8fb2efb0f..c5e2f718eeb 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -11,7 +11,6 @@
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -28,19 +27,11 @@
#define OCTEON_SPI_MAX_CLOCK_HZ 16000000
struct octeon_spi {
- struct spi_master *my_master;
u64 register_base;
u64 last_cfg;
u64 cs_enax;
};
-struct octeon_spi_setup {
- u32 max_speed_hz;
- u8 chip_select;
- u8 mode;
- u8 bits_per_word;
-};
-
static void octeon_spi_wait_ready(struct octeon_spi *p)
{
union cvmx_mpi_sts mpi_sts;
@@ -58,33 +49,23 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
struct spi_transfer *xfer,
bool last_xfer)
{
+ struct spi_device *spi = msg->spi;
union cvmx_mpi_cfg mpi_cfg;
union cvmx_mpi_tx mpi_tx;
unsigned int clkdiv;
unsigned int speed_hz;
int mode;
bool cpha, cpol;
- int bits_per_word;
const u8 *tx_buf;
u8 *rx_buf;
int len;
int i;
- struct octeon_spi_setup *msg_setup = spi_get_ctldata(msg->spi);
-
- speed_hz = msg_setup->max_speed_hz;
- mode = msg_setup->mode;
+ mode = spi->mode;
cpha = mode & SPI_CPHA;
cpol = mode & SPI_CPOL;
- bits_per_word = msg_setup->bits_per_word;
- if (xfer->speed_hz)
- speed_hz = xfer->speed_hz;
- if (xfer->bits_per_word)
- bits_per_word = xfer->bits_per_word;
-
- if (speed_hz > OCTEON_SPI_MAX_CLOCK_HZ)
- speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+ speed_hz = xfer->speed_hz ? : spi->max_speed_hz;
clkdiv = octeon_get_io_clock_rate() / (2 * speed_hz);
@@ -98,8 +79,8 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
mpi_cfg.s.cslate = cpha ? 1 : 0;
mpi_cfg.s.enable = 1;
- if (msg_setup->chip_select < 4)
- p->cs_enax |= 1ull << (12 + msg_setup->chip_select);
+ if (spi->chip_select < 4)
+ p->cs_enax |= 1ull << (12 + spi->chip_select);
mpi_cfg.u64 |= p->cs_enax;
if (mpi_cfg.u64 != p->last_cfg) {
@@ -119,7 +100,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
cvmx_write_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i), d);
}
mpi_tx.u64 = 0;
- mpi_tx.s.csid = msg_setup->chip_select;
+ mpi_tx.s.csid = spi->chip_select;
mpi_tx.s.leavecs = 1;
mpi_tx.s.txnum = tx_buf ? OCTEON_SPI_MAX_BYTES : 0;
mpi_tx.s.totnum = OCTEON_SPI_MAX_BYTES;
@@ -144,7 +125,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
}
mpi_tx.u64 = 0;
- mpi_tx.s.csid = msg_setup->chip_select;
+ mpi_tx.s.csid = spi->chip_select;
if (last_xfer)
mpi_tx.s.leavecs = xfer->cs_change;
else
@@ -166,19 +147,6 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
return xfer->len;
}
-static int octeon_spi_validate_bpw(struct spi_device *spi, u32 speed)
-{
- switch (speed) {
- case 8:
- break;
- default:
- dev_err(&spi->dev, "Error: %d bits per word not supported\n",
- speed);
- return -EINVAL;
- }
- return 0;
-}
-
static int octeon_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -187,26 +155,9 @@ static int octeon_spi_transfer_one_message(struct spi_master *master,
int status = 0;
struct spi_transfer *xfer;
- /*
- * We better have set the configuration via a call to .setup
- * before we get here.
- */
- if (spi_get_ctldata(msg->spi) == NULL) {
- status = -EINVAL;
- goto err;
- }
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (xfer->bits_per_word) {
- status = octeon_spi_validate_bpw(msg->spi,
- xfer->bits_per_word);
- if (status)
- goto err;
- }
- }
-
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- bool last_xfer = &xfer->transfer_list == msg->transfers.prev;
+ bool last_xfer = list_is_last(&xfer->transfer_list,
+ &msg->transfers);
int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
if (r < 0) {
status = r;
@@ -221,54 +172,8 @@ err:
return status;
}
-static struct octeon_spi_setup *octeon_spi_new_setup(struct spi_device *spi)
+static int octeon_spi_probe(struct platform_device *pdev)
{
- struct octeon_spi_setup *setup = kzalloc(sizeof(*setup), GFP_KERNEL);
- if (!setup)
- return NULL;
-
- setup->max_speed_hz = spi->max_speed_hz;
- setup->chip_select = spi->chip_select;
- setup->mode = spi->mode;
- setup->bits_per_word = spi->bits_per_word;
- return setup;
-}
-
-static int octeon_spi_setup(struct spi_device *spi)
-{
- int r;
- struct octeon_spi_setup *new_setup;
- struct octeon_spi_setup *old_setup = spi_get_ctldata(spi);
-
- r = octeon_spi_validate_bpw(spi, spi->bits_per_word);
- if (r)
- return r;
-
- new_setup = octeon_spi_new_setup(spi);
- if (!new_setup)
- return -ENOMEM;
-
- spi_set_ctldata(spi, new_setup);
- kfree(old_setup);
-
- return 0;
-}
-
-static void octeon_spi_cleanup(struct spi_device *spi)
-{
- struct octeon_spi_setup *old_setup = spi_get_ctldata(spi);
- spi_set_ctldata(spi, NULL);
- kfree(old_setup);
-}
-
-static int octeon_spi_nop_transfer_hardware(struct spi_master *master)
-{
- return 0;
-}
-
-static int __devinit octeon_spi_probe(struct platform_device *pdev)
-{
-
struct resource *res_mem;
struct spi_master *master;
struct octeon_spi *p;
@@ -278,8 +183,7 @@ static int __devinit octeon_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
p = spi_master_get_devdata(master);
- platform_set_drvdata(pdev, p);
- p->my_master = master;
+ platform_set_drvdata(pdev, master);
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -296,8 +200,6 @@ static int __devinit octeon_spi_probe(struct platform_device *pdev)
p->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
resource_size(res_mem));
- /* Dynamic bus numbering */
- master->bus_num = -1;
master->num_chipselect = 4;
master->mode_bits = SPI_CPHA |
SPI_CPOL |
@@ -305,14 +207,12 @@ static int __devinit octeon_spi_probe(struct platform_device *pdev)
SPI_LSB_FIRST |
SPI_3WIRE;
- master->setup = octeon_spi_setup;
- master->cleanup = octeon_spi_cleanup;
- master->prepare_transfer_hardware = octeon_spi_nop_transfer_hardware;
master->transfer_one_message = octeon_spi_transfer_one_message;
- master->unprepare_transfer_hardware = octeon_spi_nop_transfer_hardware;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
master->dev.of_node = pdev->dev.of_node;
- err = spi_register_master(master);
+ err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "register master failed: %d\n", err);
goto fail;
@@ -326,13 +226,12 @@ fail:
return err;
}
-static int __devexit octeon_spi_remove(struct platform_device *pdev)
+static int octeon_spi_remove(struct platform_device *pdev)
{
- struct octeon_spi *p = platform_get_drvdata(pdev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct octeon_spi *p = spi_master_get_devdata(master);
u64 register_base = p->register_base;
- spi_unregister_master(p->my_master);
-
/* Clear the CSENA* and put everything in a known state. */
cvmx_write_csr(register_base + OCTEON_SPI_CFG, 0);
@@ -352,7 +251,7 @@ static struct platform_driver octeon_spi_driver = {
.of_match_table = octeon_spi_match,
},
.probe = octeon_spi_probe,
- .remove = __devexit_p(octeon_spi_remove),
+ .remove = octeon_spi_remove,
};
module_platform_driver(octeon_spi_driver);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index dfb4b7f448c..e7ffcded4e1 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -83,20 +83,11 @@
#define SPI_SHUTDOWN 1
struct omap1_spi100k {
- struct work_struct work;
-
- /* lock protects queue and registers */
- spinlock_t lock;
- struct list_head msg_queue;
- struct spi_master *master;
struct clk *ick;
struct clk *fck;
/* Virtual base address of the controller */
void __iomem *base;
-
- /* State of the SPI */
- unsigned int state;
};
struct omap1_spi100k_cs {
@@ -104,15 +95,6 @@ struct omap1_spi100k_cs {
int word_len;
};
-static struct workqueue_struct *omap1_spi100k_wq;
-
-#define MOD_REG_BIT(val, mask, set) do { \
- if (set) \
- val |= mask; \
- else \
- val &= ~mask; \
-} while (0)
-
static void spi100k_enable_clock(struct spi_master *master)
{
unsigned int val;
@@ -146,7 +128,7 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
}
spi100k_enable_clock(master);
- writew( data , spi100k->base + SPI_TX_MSB);
+ writew(data , spi100k->base + SPI_TX_MSB);
writew(SPI_CTRL_SEN(0) |
SPI_CTRL_WORD_SIZE(len) |
@@ -154,7 +136,8 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
spi100k->base + SPI_CTRL);
/* Wait for bit ack send change */
- while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE);
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE)
+ ;
udelay(1000);
spi100k_disable_clock(master);
@@ -162,7 +145,7 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
static int spi100k_read_data(struct spi_master *master, int len)
{
- int dataH,dataL;
+ int dataH, dataL;
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
/* Always do at least 16 bits */
@@ -175,7 +158,8 @@ static int spi100k_read_data(struct spi_master *master, int len)
SPI_CTRL_RD,
spi100k->base + SPI_CTRL);
- while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD);
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD)
+ ;
udelay(1000);
dataL = readw(spi100k->base + SPI_RX_LSB);
@@ -211,12 +195,10 @@ static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable)
static unsigned
omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct omap1_spi100k *spi100k;
struct omap1_spi100k_cs *cs = spi->controller_state;
unsigned int count, c;
int word_len;
- spi100k = spi_master_get_devdata(spi->master);
count = xfer->len;
c = count;
word_len = cs->word_len;
@@ -228,12 +210,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=1;
+ c -= 1;
if (xfer->tx_buf != NULL)
spi100k_write_data(spi->master, word_len, *tx++);
if (xfer->rx_buf != NULL)
*rx++ = spi100k_read_data(spi->master, word_len);
- } while(c);
+ } while (c);
} else if (word_len <= 16) {
u16 *rx;
const u16 *tx;
@@ -241,12 +223,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=2;
+ c -= 2;
if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master,word_len, *tx++);
+ spi100k_write_data(spi->master, word_len, *tx++);
if (xfer->rx_buf != NULL)
- *rx++ = spi100k_read_data(spi->master,word_len);
- } while(c);
+ *rx++ = spi100k_read_data(spi->master, word_len);
+ } while (c);
} else if (word_len <= 32) {
u32 *rx;
const u32 *tx;
@@ -254,12 +236,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=4;
+ c -= 4;
if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master,word_len, *tx);
+ spi100k_write_data(spi->master, word_len, *tx);
if (xfer->rx_buf != NULL)
- *rx = spi100k_read_data(spi->master,word_len);
- } while(c);
+ *rx = spi100k_read_data(spi->master, word_len);
+ } while (c);
}
return count - c;
}
@@ -298,16 +280,10 @@ static int omap1_spi100k_setup(struct spi_device *spi)
struct omap1_spi100k *spi100k;
struct omap1_spi100k_cs *cs = spi->controller_state;
- if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
- dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
- spi->bits_per_word);
- return -EINVAL;
- }
-
spi100k = spi_master_get_devdata(spi->master);
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = spi100k->base + spi->chip_select * 0x14;
@@ -316,177 +292,106 @@ static int omap1_spi100k_setup(struct spi_device *spi)
spi100k_open(spi->master);
- clk_enable(spi100k->ick);
- clk_enable(spi100k->fck);
+ clk_prepare_enable(spi100k->ick);
+ clk_prepare_enable(spi100k->fck);
ret = omap1_spi100k_setup_transfer(spi, NULL);
- clk_disable(spi100k->ick);
- clk_disable(spi100k->fck);
+ clk_disable_unprepare(spi100k->ick);
+ clk_disable_unprepare(spi100k->fck);
return ret;
}
-static void omap1_spi100k_work(struct work_struct *work)
+static int omap1_spi100k_prepare_hardware(struct spi_master *master)
{
- struct omap1_spi100k *spi100k;
- int status = 0;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
- spi100k = container_of(work, struct omap1_spi100k, work);
- spin_lock_irq(&spi100k->lock);
+ clk_prepare_enable(spi100k->ick);
+ clk_prepare_enable(spi100k->fck);
- clk_enable(spi100k->ick);
- clk_enable(spi100k->fck);
+ return 0;
+}
- /* We only enable one channel at a time -- the one whose message is
- * at the head of the queue -- although this controller would gladly
- * arbitrate among multiple channels. This corresponds to "single
- * channel" master mode. As a side effect, we need to manage the
- * chipselect with the FORCE bit ... CS != channel enable.
- */
- while (!list_empty(&spi100k->msg_queue)) {
- struct spi_message *m;
- struct spi_device *spi;
- struct spi_transfer *t = NULL;
- int cs_active = 0;
- struct omap1_spi100k_cs *cs;
- int par_override = 0;
-
- m = container_of(spi100k->msg_queue.next, struct spi_message,
- queue);
-
- list_del_init(&m->queue);
- spin_unlock_irq(&spi100k->lock);
-
- spi = m->spi;
- cs = spi->controller_state;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
- status = -EINVAL;
+static int omap1_spi100k_transfer_one_message(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t = NULL;
+ int cs_active = 0;
+ int par_override = 0;
+ int status = 0;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+ status = -EINVAL;
+ break;
+ }
+ if (par_override || t->speed_hz || t->bits_per_word) {
+ par_override = 1;
+ status = omap1_spi100k_setup_transfer(spi, t);
+ if (status < 0)
break;
- }
- if (par_override || t->speed_hz || t->bits_per_word) {
- par_override = 1;
- status = omap1_spi100k_setup_transfer(spi, t);
- if (status < 0)
- break;
- if (!t->speed_hz && !t->bits_per_word)
- par_override = 0;
- }
+ if (!t->speed_hz && !t->bits_per_word)
+ par_override = 0;
+ }
- if (!cs_active) {
- omap1_spi100k_force_cs(spi100k, 1);
- cs_active = 1;
- }
+ if (!cs_active) {
+ omap1_spi100k_force_cs(spi100k, 1);
+ cs_active = 1;
+ }
- if (t->len) {
- unsigned count;
+ if (t->len) {
+ unsigned count;
- count = omap1_spi100k_txrx_pio(spi, t);
- m->actual_length += count;
+ count = omap1_spi100k_txrx_pio(spi, t);
+ m->actual_length += count;
- if (count != t->len) {
- status = -EIO;
- break;
- }
+ if (count != t->len) {
+ status = -EIO;
+ break;
}
+ }
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- /* ignore the "leave it on after last xfer" hint */
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
- if (t->cs_change) {
- omap1_spi100k_force_cs(spi100k, 0);
- cs_active = 0;
- }
- }
+ /* ignore the "leave it on after last xfer" hint */
- /* Restore defaults if they were overriden */
- if (par_override) {
- par_override = 0;
- status = omap1_spi100k_setup_transfer(spi, NULL);
+ if (t->cs_change) {
+ omap1_spi100k_force_cs(spi100k, 0);
+ cs_active = 0;
}
+ }
- if (cs_active)
- omap1_spi100k_force_cs(spi100k, 0);
+ /* Restore defaults if they were overriden */
+ if (par_override) {
+ par_override = 0;
+ status = omap1_spi100k_setup_transfer(spi, NULL);
+ }
- m->status = status;
- m->complete(m->context);
+ if (cs_active)
+ omap1_spi100k_force_cs(spi100k, 0);
- spin_lock_irq(&spi100k->lock);
- }
+ m->status = status;
- clk_disable(spi100k->ick);
- clk_disable(spi100k->fck);
- spin_unlock_irq(&spi100k->lock);
+ spi_finalize_current_message(master);
- if (status < 0)
- printk(KERN_WARNING "spi transfer failed with %d\n", status);
+ return status;
}
-static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m)
+static int omap1_spi100k_unprepare_hardware(struct spi_master *master)
{
- struct omap1_spi100k *spi100k;
- unsigned long flags;
- struct spi_transfer *t;
-
- m->actual_length = 0;
- m->status = -EINPROGRESS;
-
- spi100k = spi_master_get_devdata(spi->master);
-
- /* Don't accept new work if we're shutting down */
- if (spi100k->state == SPI_SHUTDOWN)
- return -ESHUTDOWN;
-
- /* reject invalid messages and transfers */
- if (list_empty(&m->transfers) || !m->complete)
- return -EINVAL;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- const void *tx_buf = t->tx_buf;
- void *rx_buf = t->rx_buf;
- unsigned len = t->len;
-
- if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ
- || (len && !(rx_buf || tx_buf))
- || (t->bits_per_word &&
- ( t->bits_per_word < 4
- || t->bits_per_word > 32))) {
- dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
- t->speed_hz,
- len,
- tx_buf ? "tx" : "",
- rx_buf ? "rx" : "",
- t->bits_per_word);
- return -EINVAL;
- }
-
- if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) {
- dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
- t->speed_hz,
- OMAP1_SPI100K_MAX_FREQ/(1<<16));
- return -EINVAL;
- }
-
- }
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
- spin_lock_irqsave(&spi100k->lock, flags);
- list_add_tail(&m->queue, &spi100k->msg_queue);
- queue_work(omap1_spi100k_wq, &spi100k->work);
- spin_unlock_irqrestore(&spi100k->lock, flags);
+ clk_disable_unprepare(spi100k->ick);
+ clk_disable_unprepare(spi100k->fck);
return 0;
}
-static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k)
-{
- return 0;
-}
-
-static int __devinit omap1_spi100k_probe(struct platform_device *pdev)
+static int omap1_spi100k_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct omap1_spi100k *spi100k;
@@ -495,141 +400,72 @@ static int __devinit omap1_spi100k_probe(struct platform_device *pdev)
if (!pdev->id)
return -EINVAL;
- master = spi_alloc_master(&pdev->dev, sizeof *spi100k);
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi100k));
if (master == NULL) {
dev_dbg(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
}
if (pdev->id != -1)
- master->bus_num = pdev->id;
+ master->bus_num = pdev->id;
master->setup = omap1_spi100k_setup;
- master->transfer = omap1_spi100k_transfer;
+ master->transfer_one_message = omap1_spi100k_transfer_one_message;
+ master->prepare_transfer_hardware = omap1_spi100k_prepare_hardware;
+ master->unprepare_transfer_hardware = omap1_spi100k_unprepare_hardware;
master->cleanup = NULL;
master->num_chipselect = 2;
master->mode_bits = MODEBITS;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->min_speed_hz = OMAP1_SPI100K_MAX_FREQ/(1<<16);
+ master->max_speed_hz = OMAP1_SPI100K_MAX_FREQ;
- dev_set_drvdata(&pdev->dev, master);
+ platform_set_drvdata(pdev, master);
spi100k = spi_master_get_devdata(master);
- spi100k->master = master;
/*
* The memory region base address is taken as the platform_data.
* You should allocate this with ioremap() before initializing
* the SPI.
*/
- spi100k->base = (void __iomem *) pdev->dev.platform_data;
-
- INIT_WORK(&spi100k->work, omap1_spi100k_work);
+ spi100k->base = (void __iomem *)dev_get_platdata(&pdev->dev);
- spin_lock_init(&spi100k->lock);
- INIT_LIST_HEAD(&spi100k->msg_queue);
- spi100k->ick = clk_get(&pdev->dev, "ick");
+ spi100k->ick = devm_clk_get(&pdev->dev, "ick");
if (IS_ERR(spi100k->ick)) {
dev_dbg(&pdev->dev, "can't get spi100k_ick\n");
status = PTR_ERR(spi100k->ick);
- goto err1;
+ goto err;
}
- spi100k->fck = clk_get(&pdev->dev, "fck");
+ spi100k->fck = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(spi100k->fck)) {
dev_dbg(&pdev->dev, "can't get spi100k_fck\n");
status = PTR_ERR(spi100k->fck);
- goto err2;
+ goto err;
}
- if (omap1_spi100k_reset(spi100k) < 0)
- goto err3;
-
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status < 0)
- goto err3;
-
- spi100k->state = SPI_RUNNING;
+ goto err;
return status;
-err3:
- clk_put(spi100k->fck);
-err2:
- clk_put(spi100k->ick);
-err1:
+err:
spi_master_put(master);
return status;
}
-static int __exit omap1_spi100k_remove(struct platform_device *pdev)
-{
- struct spi_master *master;
- struct omap1_spi100k *spi100k;
- struct resource *r;
- unsigned limit = 500;
- unsigned long flags;
- int status = 0;
-
- master = dev_get_drvdata(&pdev->dev);
- spi100k = spi_master_get_devdata(master);
-
- spin_lock_irqsave(&spi100k->lock, flags);
-
- spi100k->state = SPI_SHUTDOWN;
- while (!list_empty(&spi100k->msg_queue) && limit--) {
- spin_unlock_irqrestore(&spi100k->lock, flags);
- msleep(10);
- spin_lock_irqsave(&spi100k->lock, flags);
- }
-
- if (!list_empty(&spi100k->msg_queue))
- status = -EBUSY;
-
- spin_unlock_irqrestore(&spi100k->lock, flags);
-
- if (status != 0)
- return status;
-
- clk_put(spi100k->fck);
- clk_put(spi100k->ick);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- spi_unregister_master(master);
-
- return 0;
-}
-
static struct platform_driver omap1_spi100k_driver = {
.driver = {
.name = "omap1_spi100k",
.owner = THIS_MODULE,
},
- .remove = __exit_p(omap1_spi100k_remove),
+ .probe = omap1_spi100k_probe,
};
-
-static int __init omap1_spi100k_init(void)
-{
- omap1_spi100k_wq = create_singlethread_workqueue(
- omap1_spi100k_driver.driver.name);
-
- if (omap1_spi100k_wq == NULL)
- return -1;
-
- return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe);
-}
-
-static void __exit omap1_spi100k_exit(void)
-{
- platform_driver_unregister(&omap1_spi100k_driver);
-
- destroy_workqueue(omap1_spi100k_wq);
-}
-
-module_init(omap1_spi100k_init);
-module_exit(omap1_spi100k_exit);
+module_platform_driver(omap1_spi100k_driver);
MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver");
MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 0a94d9dc9c3..0f5a0aa3b87 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -37,7 +37,6 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/clk.h>
@@ -99,7 +98,6 @@ struct uwire_spi {
};
struct uwire_state {
- unsigned bits_per_word;
unsigned div1_idx;
};
@@ -210,9 +208,8 @@ static void uwire_chipselect(struct spi_device *spi, int value)
static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
{
- struct uwire_state *ust = spi->controller_state;
unsigned len = t->len;
- unsigned bits = ust->bits_per_word;
+ unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned bytes;
u16 val, w;
int status = 0;
@@ -220,10 +217,6 @@ static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
if (!t->tx_buf && !t->rx_buf)
return 0;
- /* Microwire doesn't read and write concurrently */
- if (t->tx_buf && t->rx_buf)
- return -EPERM;
-
w = spi->chip_select << 10;
w |= CS_CMD;
@@ -322,7 +315,6 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
struct uwire_state *ust = spi->controller_state;
struct uwire_spi *uwire;
unsigned flags = 0;
- unsigned bits;
unsigned hz;
unsigned long rate;
int div1_idx;
@@ -332,23 +324,6 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
uwire = spi_master_get_devdata(spi->master);
- if (spi->chip_select > 3) {
- pr_debug("%s: cs%d?\n", dev_name(&spi->dev), spi->chip_select);
- status = -ENODEV;
- goto done;
- }
-
- bits = spi->bits_per_word;
- if (t != NULL && t->bits_per_word)
- bits = t->bits_per_word;
-
- if (bits > 16) {
- pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits);
- status = -ENODEV;
- goto done;
- }
- ust->bits_per_word = bits;
-
/* mode 0..3, clock inverted separately;
* standard nCS signaling;
* don't treat DI=high as "not ready"
@@ -476,7 +451,7 @@ static void uwire_off(struct uwire_spi *uwire)
spi_master_put(uwire->bitbang.master);
}
-static int __init uwire_probe(struct platform_device *pdev)
+static int uwire_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct uwire_spi *uwire;
@@ -495,13 +470,14 @@ static int __init uwire_probe(struct platform_device *pdev)
return -ENOMEM;
}
- dev_set_drvdata(&pdev->dev, uwire);
+ platform_set_drvdata(pdev, uwire);
uwire->ck = clk_get(&pdev->dev, "fck");
if (IS_ERR(uwire->ck)) {
status = PTR_ERR(uwire->ck);
dev_dbg(&pdev->dev, "no functional clock?\n");
spi_master_put(master);
+ iounmap(uwire_base);
return status;
}
clk_enable(uwire->ck);
@@ -515,7 +491,7 @@ static int __init uwire_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
master->flags = SPI_MASTER_HALF_DUPLEX;
master->bus_num = 2; /* "official" */
@@ -536,17 +512,16 @@ static int __init uwire_probe(struct platform_device *pdev)
return status;
}
-static int __exit uwire_remove(struct platform_device *pdev)
+static int uwire_remove(struct platform_device *pdev)
{
- struct uwire_spi *uwire = dev_get_drvdata(&pdev->dev);
- int status;
+ struct uwire_spi *uwire = platform_get_drvdata(pdev);
// FIXME remove all child devices, somewhere ...
- status = spi_bitbang_stop(&uwire->bitbang);
+ spi_bitbang_stop(&uwire->bitbang);
uwire_off(uwire);
iounmap(uwire_base);
- return status;
+ return 0;
}
/* work with hotplug and coldplug */
@@ -557,7 +532,8 @@ static struct platform_driver uwire_driver = {
.name = "omap_uwire",
.owner = THIS_MODULE,
},
- .remove = __exit_p(uwire_remove),
+ .probe = uwire_probe,
+ .remove = uwire_remove,
// suspend ... unuse ck
// resume ... use ck
};
@@ -579,7 +555,7 @@ static int __init omap_uwire_init(void)
omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9);
}
- return platform_driver_probe(&uwire_driver, uwire_probe);
+ return platform_driver_register(&uwire_driver);
}
static void __exit omap_uwire_exit(void)
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 3542fdc664b..4dc77df3886 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -22,7 +22,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -38,14 +37,16 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/err.h>
+#include <linux/gcd.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#define OMAP2_MCSPI_MAX_FREQ 48000000
+#define OMAP2_MCSPI_MAX_DIVIDER 4096
+#define OMAP2_MCSPI_MAX_FIFODEPTH 64
+#define OMAP2_MCSPI_MAX_FIFOWCNT 0xFFFF
#define SPI_AUTOSUSPEND_TIMEOUT 2000
#define OMAP2_MCSPI_REVISION 0x00
@@ -55,6 +56,7 @@
#define OMAP2_MCSPI_WAKEUPENABLE 0x20
#define OMAP2_MCSPI_SYST 0x24
#define OMAP2_MCSPI_MODULCTRL 0x28
+#define OMAP2_MCSPI_XFERLEVEL 0x7c
/* per-channel banks, 0x14 bytes each, first is: */
#define OMAP2_MCSPI_CHCONF0 0x2c
@@ -64,6 +66,7 @@
#define OMAP2_MCSPI_RX0 0x3c
/* per-register bitmasks: */
+#define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17)
#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
@@ -84,12 +87,17 @@
#define OMAP2_MCSPI_CHCONF_IS BIT(18)
#define OMAP2_MCSPI_CHCONF_TURBO BIT(19)
#define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
+#define OMAP2_MCSPI_CHCONF_FFET BIT(27)
+#define OMAP2_MCSPI_CHCONF_FFER BIT(28)
+#define OMAP2_MCSPI_CHCONF_CLKG BIT(29)
#define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
#define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
#define OMAP2_MCSPI_CHSTAT_EOT BIT(2)
+#define OMAP2_MCSPI_CHSTAT_TXFFE BIT(3)
#define OMAP2_MCSPI_CHCTRL_EN BIT(0)
+#define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK (0xff << 8)
#define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
@@ -103,6 +111,9 @@ struct omap2_mcspi_dma {
struct completion dma_tx_completion;
struct completion dma_rx_completion;
+
+ char dma_rx_ch_name[14];
+ char dma_tx_ch_name[14];
};
/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
@@ -130,6 +141,8 @@ struct omap2_mcspi {
struct omap2_mcspi_dma *dma_channels;
struct device *dev;
struct omap2_mcspi_regs ctx;
+ int fifo_depth;
+ unsigned int pin_dir:1;
};
struct omap2_mcspi_cs {
@@ -138,7 +151,7 @@ struct omap2_mcspi_cs {
int word_len;
struct list_head node;
/* Context save and restore shadow register */
- u32 chconf0;
+ u32 chconf0, chctrl0;
};
static inline void mcspi_write_reg(struct spi_master *master,
@@ -146,14 +159,14 @@ static inline void mcspi_write_reg(struct spi_master *master,
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
- __raw_writel(val, mcspi->base + idx);
+ writel_relaxed(val, mcspi->base + idx);
}
static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
- return __raw_readl(mcspi->base + idx);
+ return readl_relaxed(mcspi->base + idx);
}
static inline void mcspi_write_cs_reg(const struct spi_device *spi,
@@ -161,14 +174,14 @@ static inline void mcspi_write_cs_reg(const struct spi_device *spi,
{
struct omap2_mcspi_cs *cs = spi->controller_state;
- __raw_writel(val, cs->base + idx);
+ writel_relaxed(val, cs->base + idx);
}
static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
{
struct omap2_mcspi_cs *cs = spi->controller_state;
- return __raw_readl(cs->base + idx);
+ return readl_relaxed(cs->base + idx);
}
static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
@@ -187,6 +200,16 @@ static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
}
+static inline int mcspi_bytes_per_word(int word_len)
+{
+ if (word_len <= 8)
+ return 1;
+ else if (word_len <= 16)
+ return 2;
+ else /* word_len <= 32 */
+ return 4;
+}
+
static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
int is_read, int enable)
{
@@ -209,10 +232,16 @@ static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
u32 l;
- l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
+ l = cs->chctrl0;
+ if (enable)
+ l |= OMAP2_MCSPI_CHCTRL_EN;
+ else
+ l &= ~OMAP2_MCSPI_CHCTRL_EN;
+ cs->chctrl0 = l;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
/* Flash post-writes */
mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
}
@@ -248,6 +277,64 @@ static void omap2_mcspi_set_master_mode(struct spi_master *master)
ctx->modulctrl = l;
}
+static void omap2_mcspi_set_fifo(const struct spi_device *spi,
+ struct spi_transfer *t, int enable)
+{
+ struct spi_master *master = spi->master;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi *mcspi;
+ unsigned int wcnt;
+ int max_fifo_depth, fifo_depth, bytes_per_word;
+ u32 chconf, xferlevel;
+
+ mcspi = spi_master_get_devdata(master);
+
+ chconf = mcspi_cached_chconf0(spi);
+ if (enable) {
+ bytes_per_word = mcspi_bytes_per_word(cs->word_len);
+ if (t->len % bytes_per_word != 0)
+ goto disable_fifo;
+
+ if (t->rx_buf != NULL && t->tx_buf != NULL)
+ max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
+ else
+ max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
+
+ fifo_depth = gcd(t->len, max_fifo_depth);
+ if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
+ goto disable_fifo;
+
+ wcnt = t->len / bytes_per_word;
+ if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
+ goto disable_fifo;
+
+ xferlevel = wcnt << 16;
+ if (t->rx_buf != NULL) {
+ chconf |= OMAP2_MCSPI_CHCONF_FFER;
+ xferlevel |= (fifo_depth - 1) << 8;
+ }
+ if (t->tx_buf != NULL) {
+ chconf |= OMAP2_MCSPI_CHCONF_FFET;
+ xferlevel |= fifo_depth - 1;
+ }
+
+ mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
+ mcspi_write_chconf0(spi, chconf);
+ mcspi->fifo_depth = fifo_depth;
+
+ return;
+ }
+
+disable_fifo:
+ if (t->rx_buf != NULL)
+ chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
+ else
+ chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
+
+ mcspi_write_chconf0(spi, chconf);
+ mcspi->fifo_depth = 0;
+}
+
static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
{
struct spi_master *spi_cntrl = mcspi->master;
@@ -259,24 +346,7 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
list_for_each_entry(cs, &ctx->cs, node)
- __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
-}
-
-static int omap2_prepare_transfer(struct spi_master *master)
-{
- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-
- pm_runtime_get_sync(mcspi->dev);
- return 0;
-}
-
-static int omap2_unprepare_transfer(struct spi_master *master)
-{
- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-
- pm_runtime_mark_last_busy(mcspi->dev);
- pm_runtime_put_autosuspend(mcspi->dev);
- return 0;
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
@@ -284,9 +354,13 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(1000);
- while (!(__raw_readl(reg) & bit)) {
- if (time_after(jiffies, timeout))
- return -1;
+ while (!(readl_relaxed(reg) & bit)) {
+ if (time_after(jiffies, timeout)) {
+ if (!(readl_relaxed(reg) & bit))
+ return -ETIMEDOUT;
+ else
+ return 0;
+ }
cpu_relax();
}
return 0;
@@ -298,10 +372,10 @@ static void omap2_mcspi_rx_callback(void *data)
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- complete(&mcspi_dma->dma_rx_completion);
-
/* We must disable the DMA RX request */
omap2_mcspi_set_dma_req(spi, 1, 0);
+
+ complete(&mcspi_dma->dma_rx_completion);
}
static void omap2_mcspi_tx_callback(void *data)
@@ -310,10 +384,10 @@ static void omap2_mcspi_tx_callback(void *data)
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- complete(&mcspi_dma->dma_tx_completion);
-
/* We must disable the DMA TX request */
omap2_mcspi_set_dma_req(spi, 0, 0);
+
+ complete(&mcspi_dma->dma_tx_completion);
}
static void omap2_mcspi_tx_dma(struct spi_device *spi,
@@ -323,19 +397,11 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
unsigned int count;
- u8 * rx;
- const u8 * tx;
- void __iomem *chstat_reg;
- struct omap2_mcspi_cs *cs = spi->controller_state;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
count = xfer->len;
- rx = xfer->rx_buf;
- tx = xfer->tx_buf;
- chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
-
if (mcspi_dma->dma_tx) {
struct dma_async_tx_descriptor *tx;
struct scatterlist sg;
@@ -359,19 +425,6 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
dma_async_issue_pending(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 1);
- wait_for_completion(&mcspi_dma->dma_tx_completion);
- dma_unmap_single(mcspi->dev, xfer->tx_dma, count,
- DMA_TO_DEVICE);
-
- /* for TX_ONLY mode, be sure all words have shifted out */
- if (rx == NULL) {
- if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_TXS) < 0)
- dev_err(&spi->dev, "TXS timed out\n");
- else if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_EOT) < 0)
- dev_err(&spi->dev, "EOT timed out\n");
- }
}
static unsigned
@@ -381,7 +434,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
- unsigned int count;
+ unsigned int count, dma_count;
u32 l;
int elements = 0;
int word_len, element_count;
@@ -389,6 +442,11 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
count = xfer->len;
+ dma_count = xfer->len;
+
+ if (mcspi->fifo_depth == 0)
+ dma_count -= es;
+
word_len = cs->word_len;
l = mcspi_cached_chconf0(spi);
@@ -402,16 +460,15 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
if (mcspi_dma->dma_rx) {
struct dma_async_tx_descriptor *tx;
struct scatterlist sg;
- size_t len = xfer->len - es;
dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
- if (l & OMAP2_MCSPI_CHCONF_TURBO)
- len -= es;
+ if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
+ dma_count -= es;
sg_init_table(&sg, 1);
sg_dma_address(&sg) = xfer->rx_dma;
- sg_dma_len(&sg) = len;
+ sg_dma_len(&sg) = dma_count;
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
@@ -431,6 +488,10 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
wait_for_completion(&mcspi_dma->dma_rx_completion);
dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
DMA_FROM_DEVICE);
+
+ if (mcspi->fifo_depth > 0)
+ return count;
+
omap2_mcspi_set_enable(spi, 0);
elements = element_count - 1;
@@ -450,10 +511,9 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
else /* word_len <= 32 */
((u32 *)xfer->rx_buf)[elements++] = w;
} else {
- dev_err(&spi->dev, "DMA RX penultimate word empty");
- count -= (word_len <= 8) ? 2 :
- (word_len <= 16) ? 4 :
- /* word_len <= 32 */ 8;
+ int bytes_per_word = mcspi_bytes_per_word(word_len);
+ dev_err(&spi->dev, "DMA RX penultimate word empty\n");
+ count -= (bytes_per_word << 1);
omap2_mcspi_set_enable(spi, 1);
return count;
}
@@ -470,10 +530,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
else /* word_len <= 32 */
((u32 *)xfer->rx_buf)[elements] = w;
} else {
- dev_err(&spi->dev, "DMA RX last word empty");
- count -= (word_len <= 8) ? 1 :
- (word_len <= 16) ? 2 :
- /* word_len <= 32 */ 4;
+ dev_err(&spi->dev, "DMA RX last word empty\n");
+ count -= mcspi_bytes_per_word(word_len);
}
omap2_mcspi_set_enable(spi, 1);
return count;
@@ -492,6 +550,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
struct dma_slave_config cfg;
enum dma_slave_buswidth width;
unsigned es;
+ u32 burst;
+ void __iomem *chstat_reg;
+ void __iomem *irqstat_reg;
+ int wait_res;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -509,25 +571,69 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
es = 4;
}
+ count = xfer->len;
+ burst = 1;
+
+ if (mcspi->fifo_depth > 0) {
+ if (count > mcspi->fifo_depth)
+ burst = mcspi->fifo_depth / es;
+ else
+ burst = count / es;
+ }
+
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
cfg.src_addr_width = width;
cfg.dst_addr_width = width;
- cfg.src_maxburst = 1;
- cfg.dst_maxburst = 1;
+ cfg.src_maxburst = burst;
+ cfg.dst_maxburst = burst;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
- count = xfer->len;
-
if (tx != NULL)
omap2_mcspi_tx_dma(spi, xfer, cfg);
if (rx != NULL)
- return omap2_mcspi_rx_dma(spi, xfer, cfg, es);
+ count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
+
+ if (tx != NULL) {
+ wait_for_completion(&mcspi_dma->dma_tx_completion);
+ dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
+ DMA_TO_DEVICE);
+
+ if (mcspi->fifo_depth > 0) {
+ irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
+
+ if (mcspi_wait_for_reg_bit(irqstat_reg,
+ OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
+ dev_err(&spi->dev, "EOW timed out\n");
+
+ mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
+ OMAP2_MCSPI_IRQSTATUS_EOW);
+ }
+ /* for TX_ONLY mode, be sure all words have shifted out */
+ if (rx == NULL) {
+ chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ if (mcspi->fifo_depth > 0) {
+ wait_res = mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXFFE);
+ if (wait_res < 0)
+ dev_err(&spi->dev, "TXFFE timed out\n");
+ } else {
+ wait_res = mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS);
+ if (wait_res < 0)
+ dev_err(&spi->dev, "TXS timed out\n");
+ }
+ if (wait_res >= 0 &&
+ (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_EOT) < 0))
+ dev_err(&spi->dev, "EOT timed out\n");
+ }
+ }
return count;
}
@@ -577,7 +683,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
dev_vdbg(&spi->dev, "write-%d %02x\n",
word_len, *tx);
- __raw_writel(*tx++, tx_reg);
+ writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -589,7 +695,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
if (c == 1 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -603,7 +709,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
omap2_mcspi_set_enable(spi, 0);
}
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
}
@@ -624,7 +730,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
dev_vdbg(&spi->dev, "write-%d %04x\n",
word_len, *tx);
- __raw_writel(*tx++, tx_reg);
+ writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -636,7 +742,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
if (c == 2 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -650,7 +756,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
omap2_mcspi_set_enable(spi, 0);
}
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
}
@@ -671,7 +777,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
dev_vdbg(&spi->dev, "write-%d %08x\n",
word_len, *tx);
- __raw_writel(*tx++, tx_reg);
+ writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -683,7 +789,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
if (c == 4 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -697,7 +803,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
omap2_mcspi_set_enable(spi, 0);
}
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
}
@@ -742,7 +848,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
struct spi_master *spi_cntrl;
- u32 l = 0, div = 0;
+ u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
u8 word_len = spi->bits_per_word;
u32 speed_hz = spi->max_speed_hz;
@@ -758,15 +864,32 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
speed_hz = t->speed_hz;
speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
- div = omap2_mcspi_calc_divisor(speed_hz);
+ if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+ clkd = omap2_mcspi_calc_divisor(speed_hz);
+ speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
+ clkg = 0;
+ } else {
+ div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+ speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
+ clkd = (div - 1) & 0xf;
+ extclk = (div - 1) >> 4;
+ clkg = OMAP2_MCSPI_CHCONF_CLKG;
+ }
l = mcspi_cached_chconf0(spi);
/* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
* REVISIT: this controller could support SPI_3WIRE mode.
*/
- l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
- l |= OMAP2_MCSPI_CHCONF_DPE0;
+ if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+ l &= ~OMAP2_MCSPI_CHCONF_IS;
+ l &= ~OMAP2_MCSPI_CHCONF_DPE1;
+ l |= OMAP2_MCSPI_CHCONF_DPE0;
+ } else {
+ l |= OMAP2_MCSPI_CHCONF_IS;
+ l |= OMAP2_MCSPI_CHCONF_DPE1;
+ l &= ~OMAP2_MCSPI_CHCONF_DPE0;
+ }
/* wordlength */
l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
@@ -780,7 +903,16 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
/* set clock divisor */
l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
- l |= div << 2;
+ l |= clkd << 2;
+
+ /* set clock granularity */
+ l &= ~OMAP2_MCSPI_CHCONF_CLKG;
+ l |= clkg;
+ if (clkg) {
+ cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
+ cs->chctrl0 |= extclk << 8;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
+ }
/* set SPI mode 0..3 */
if (spi->mode & SPI_CPOL)
@@ -795,13 +927,17 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
mcspi_write_chconf0(spi, l);
dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
- OMAP2_MCSPI_MAX_FREQ >> div,
+ speed_hz,
(spi->mode & SPI_CPHA) ? "trailing" : "leading",
(spi->mode & SPI_CPOL) ? "inverted" : "normal");
return 0;
}
+/*
+ * Note that we currently allow DMA only if we get a channel
+ * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
+ */
static int omap2_mcspi_request_dma(struct spi_device *spi)
{
struct spi_master *master = spi->master;
@@ -819,22 +955,31 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
sig = mcspi_dma->dma_rx_sync_dev;
- mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
- if (!mcspi_dma->dma_rx) {
- dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
- return -EAGAIN;
- }
+
+ mcspi_dma->dma_rx =
+ dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+ &sig, &master->dev,
+ mcspi_dma->dma_rx_ch_name);
+ if (!mcspi_dma->dma_rx)
+ goto no_dma;
sig = mcspi_dma->dma_tx_sync_dev;
- mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ mcspi_dma->dma_tx =
+ dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+ &sig, &master->dev,
+ mcspi_dma->dma_tx_ch_name);
+
if (!mcspi_dma->dma_tx) {
- dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
dma_release_channel(mcspi_dma->dma_rx);
mcspi_dma->dma_rx = NULL;
- return -EAGAIN;
+ goto no_dma;
}
return 0;
+
+no_dma:
+ dev_warn(&spi->dev, "not using DMA for McSPI\n");
+ return -EAGAIN;
}
static int omap2_mcspi_setup(struct spi_device *spi)
@@ -845,12 +990,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs = spi->controller_state;
- if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
- dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
- spi->bits_per_word);
- return -EINVAL;
- }
-
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
if (!cs) {
@@ -860,6 +999,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
cs->base = mcspi->base + spi->chip_select * 0x14;
cs->phys = mcspi->phys + spi->chip_select * 0x14;
cs->chconf0 = 0;
+ cs->chctrl0 = 0;
spi->controller_state = cs;
/* Link this to context save list */
list_add_tail(&cs->node, &ctx->cs);
@@ -867,7 +1007,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
ret = omap2_mcspi_request_dma(spi);
- if (ret < 0)
+ if (ret < 0 && ret != -EAGAIN)
return ret;
}
@@ -924,6 +1064,8 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
struct spi_device *spi;
struct spi_transfer *t = NULL;
+ struct spi_master *master;
+ struct omap2_mcspi_dma *mcspi_dma;
int cs_active = 0;
struct omap2_mcspi_cs *cs;
struct omap2_mcspi_device_config *cd;
@@ -932,23 +1074,36 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
u32 chconf;
spi = m->spi;
+ master = spi->master;
+ mcspi_dma = mcspi->dma_channels + spi->chip_select;
cs = spi->controller_state;
cd = spi->controller_data;
- omap2_mcspi_set_enable(spi, 1);
+ omap2_mcspi_set_enable(spi, 0);
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
status = -EINVAL;
break;
}
- if (par_override || t->speed_hz || t->bits_per_word) {
+ if (par_override ||
+ (t->speed_hz != spi->max_speed_hz) ||
+ (t->bits_per_word != spi->bits_per_word)) {
par_override = 1;
status = omap2_mcspi_setup_transfer(spi, t);
if (status < 0)
break;
- if (!t->speed_hz && !t->bits_per_word)
+ if (t->speed_hz == spi->max_speed_hz &&
+ t->bits_per_word == spi->bits_per_word)
par_override = 0;
}
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+
if (!cs_active) {
omap2_mcspi_force_cs(spi, 1);
@@ -975,12 +1130,19 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
if (t->len) {
unsigned count;
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+ (m->is_dma_mapped || t->len >= DMA_MIN_BYTES))
+ omap2_mcspi_set_fifo(spi, t, 1);
+
+ omap2_mcspi_set_enable(spi, 1);
+
/* RX_ONLY mode needs dummy data in TX reg */
if (t->tx_buf == NULL)
- __raw_writel(0, cs->base
+ writel_relaxed(0, cs->base
+ OMAP2_MCSPI_TX0);
- if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+ (m->is_dma_mapped || t->len >= DMA_MIN_BYTES))
count = omap2_mcspi_txrx_dma(spi, t);
else
count = omap2_mcspi_txrx_pio(spi, t);
@@ -1000,6 +1162,11 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
omap2_mcspi_force_cs(spi, 0);
cs_active = 0;
}
+
+ omap2_mcspi_set_enable(spi, 0);
+
+ if (mcspi->fifo_depth > 0)
+ omap2_mcspi_set_fifo(spi, t, 0);
}
/* Restore defaults if they were overriden */
if (par_override) {
@@ -1010,35 +1177,42 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
if (cs_active)
omap2_mcspi_force_cs(spi, 0);
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+
omap2_mcspi_set_enable(spi, 0);
- m->status = status;
+ if (mcspi->fifo_depth > 0 && t)
+ omap2_mcspi_set_fifo(spi, t, 0);
+ m->status = status;
}
static int omap2_mcspi_transfer_one_message(struct spi_master *master,
- struct spi_message *m)
+ struct spi_message *m)
{
+ struct spi_device *spi;
struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
struct spi_transfer *t;
+ spi = m->spi;
mcspi = spi_master_get_devdata(master);
+ mcspi_dma = mcspi->dma_channels + spi->chip_select;
m->actual_length = 0;
m->status = 0;
- /* reject invalid messages and transfers */
- if (list_empty(&m->transfers))
- return -EINVAL;
list_for_each_entry(t, &m->transfers, transfer_list) {
const void *tx_buf = t->tx_buf;
void *rx_buf = t->rx_buf;
unsigned len = t->len;
- if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
- || (len && !(rx_buf || tx_buf))
- || (t->bits_per_word &&
- ( t->bits_per_word < 4
- || t->bits_per_word > 32))) {
+ if ((len && !(rx_buf || tx_buf))) {
dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
t->speed_hz,
len,
@@ -1047,17 +1221,11 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
t->bits_per_word);
return -EINVAL;
}
- if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
- dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
- t->speed_hz,
- OMAP2_MCSPI_MAX_FREQ >> 15);
- return -EINVAL;
- }
if (m->is_dma_mapped || len < DMA_MIN_BYTES)
continue;
- if (tx_buf != NULL) {
+ if (mcspi_dma->dma_tx && tx_buf != NULL) {
t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
len, DMA_TO_DEVICE);
if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
@@ -1066,7 +1234,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
return -EINVAL;
}
}
- if (rx_buf != NULL) {
+ if (mcspi_dma->dma_rx && rx_buf != NULL) {
t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
@@ -1085,7 +1253,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
return 0;
}
-static int __devinit omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
+static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
@@ -1096,7 +1264,7 @@ static int __devinit omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
return ret;
mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
- OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+ OMAP2_MCSPI_WAKEUPENABLE_WKEN);
ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
omap2_mcspi_set_master_mode(master);
@@ -1138,7 +1306,7 @@ static const struct of_device_id omap_mcspi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
-static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
+static int omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
const struct omap2_mcspi_platform_config *pdata;
@@ -1149,7 +1317,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
static int bus_num = 1;
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *match;
- struct pinctrl *pinctrl;
master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
if (master == NULL) {
@@ -1159,13 +1326,19 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = omap2_mcspi_setup;
- master->prepare_transfer_hardware = omap2_prepare_transfer;
- master->unprepare_transfer_hardware = omap2_unprepare_transfer;
+ master->auto_runtime_pm = true;
master->transfer_one_message = omap2_mcspi_transfer_one_message;
master->cleanup = omap2_mcspi_cleanup;
master->dev.of_node = node;
+ master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+ master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
+
+ platform_set_drvdata(pdev, master);
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi->master = master;
match = of_match_device(omap_mcspi_of_match, &pdev->dev);
if (match) {
@@ -1175,19 +1348,17 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
master->num_chipselect = num_cs;
master->bus_num = bus_num++;
+ if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL))
+ mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
} else {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
master->num_chipselect = pdata->num_cs;
if (pdev->id != -1)
master->bus_num = pdev->id;
+ mcspi->pin_dir = pdata->pin_dir;
}
regs_offset = pdata->regs_offset;
- dev_set_drvdata(&pdev->dev, master);
-
- mcspi = spi_master_get_devdata(master);
- mcspi->master = master;
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
status = -ENODEV;
@@ -1198,10 +1369,9 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
r->end += regs_offset;
mcspi->phys = r->start;
- mcspi->base = devm_request_and_ioremap(&pdev->dev, r);
- if (!mcspi->base) {
- dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
- status = -ENOMEM;
+ mcspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(mcspi->base)) {
+ status = PTR_ERR(mcspi->base);
goto free_master;
}
@@ -1209,55 +1379,65 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&mcspi->ctx.cs);
- mcspi->dma_channels = kcalloc(master->num_chipselect,
- sizeof(struct omap2_mcspi_dma),
- GFP_KERNEL);
-
- if (mcspi->dma_channels == NULL)
+ mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
+ sizeof(struct omap2_mcspi_dma),
+ GFP_KERNEL);
+ if (mcspi->dma_channels == NULL) {
+ status = -ENOMEM;
goto free_master;
+ }
for (i = 0; i < master->num_chipselect; i++) {
- char dma_ch_name[14];
+ char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
+ char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name;
struct resource *dma_res;
- sprintf(dma_ch_name, "rx%d", i);
- dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
- dma_ch_name);
- if (!dma_res) {
- dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
- status = -ENODEV;
- break;
- }
+ sprintf(dma_rx_ch_name, "rx%d", i);
+ if (!pdev->dev.of_node) {
+ dma_res =
+ platform_get_resource_byname(pdev,
+ IORESOURCE_DMA,
+ dma_rx_ch_name);
+ if (!dma_res) {
+ dev_dbg(&pdev->dev,
+ "cannot get DMA RX channel\n");
+ status = -ENODEV;
+ break;
+ }
- mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
- sprintf(dma_ch_name, "tx%d", i);
- dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
- dma_ch_name);
- if (!dma_res) {
- dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
- status = -ENODEV;
- break;
+ mcspi->dma_channels[i].dma_rx_sync_dev =
+ dma_res->start;
}
+ sprintf(dma_tx_ch_name, "tx%d", i);
+ if (!pdev->dev.of_node) {
+ dma_res =
+ platform_get_resource_byname(pdev,
+ IORESOURCE_DMA,
+ dma_tx_ch_name);
+ if (!dma_res) {
+ dev_dbg(&pdev->dev,
+ "cannot get DMA TX channel\n");
+ status = -ENODEV;
+ break;
+ }
- mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
+ mcspi->dma_channels[i].dma_tx_sync_dev =
+ dma_res->start;
+ }
}
if (status < 0)
- goto dma_chnl_free;
-
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&pdev->dev,
- "pins are not configured from the driver\n");
+ goto free_master;
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_enable(&pdev->dev);
- if (status || omap2_mcspi_master_setup(mcspi) < 0)
+ status = omap2_mcspi_master_setup(mcspi);
+ if (status < 0)
goto disable_pm;
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status < 0)
goto disable_pm;
@@ -1265,29 +1445,19 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
disable_pm:
pm_runtime_disable(&pdev->dev);
-dma_chnl_free:
- kfree(mcspi->dma_channels);
free_master:
spi_master_put(master);
return status;
}
-static int __devexit omap2_mcspi_remove(struct platform_device *pdev)
+static int omap2_mcspi_remove(struct platform_device *pdev)
{
- struct spi_master *master;
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *dma_channels;
-
- master = dev_get_drvdata(&pdev->dev);
- mcspi = spi_master_get_devdata(master);
- dma_channels = mcspi->dma_channels;
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
pm_runtime_put_sync(mcspi->dev);
pm_runtime_disable(&pdev->dev);
- spi_unregister_master(master);
- kfree(dma_channels);
-
return 0;
}
@@ -1315,9 +1485,9 @@ static int omap2_mcspi_resume(struct device *dev)
* change in account.
*/
cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
- __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
- __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
}
pm_runtime_mark_last_busy(mcspi->dev);
@@ -1341,7 +1511,7 @@ static struct platform_driver omap2_mcspi_driver = {
.of_match_table = omap_mcspi_of_match,
},
.probe = omap2_mcspi_probe,
- .remove = __devexit_p(omap2_mcspi_remove),
+ .remove = omap2_mcspi_remove,
};
module_platform_driver(omap2_mcspi_driver);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index b17c09cf0a0..d018a4aac3a 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -9,7 +9,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -19,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/sizes.h>
#include <asm/unaligned.h>
#define DRIVER_NAME "orion_spi"
@@ -32,14 +32,16 @@
#define ORION_SPI_DATA_IN_REG 0x0c
#define ORION_SPI_INT_CAUSE_REG 0x10
+#define ORION_SPI_MODE_CPOL (1 << 11)
+#define ORION_SPI_MODE_CPHA (1 << 12)
#define ORION_SPI_IF_8_16_BIT_MODE (1 << 5)
#define ORION_SPI_CLK_PRESCALE_MASK 0x1F
+#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \
+ ORION_SPI_MODE_CPHA)
struct orion_spi {
struct spi_master *master;
void __iomem *base;
- unsigned int max_speed;
- unsigned int min_speed;
struct clk *clk;
};
@@ -70,23 +72,6 @@ orion_spi_clrbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
writel(val, reg_addr);
}
-static int orion_spi_set_transfer_size(struct orion_spi *orion_spi, int size)
-{
- if (size == 16) {
- orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
- ORION_SPI_IF_8_16_BIT_MODE);
- } else if (size == 8) {
- orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
- ORION_SPI_IF_8_16_BIT_MODE);
- } else {
- pr_debug("Bad bits per word value %d (only 8 or 16 are "
- "allowed).\n", size);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
{
u32 tclk_hz;
@@ -123,6 +108,23 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
return 0;
}
+static void
+orion_spi_mode_set(struct spi_device *spi)
+{
+ u32 reg;
+ struct orion_spi *orion_spi;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+ reg &= ~ORION_SPI_MODE_MASK;
+ if (spi->mode & SPI_CPOL)
+ reg |= ORION_SPI_MODE_CPOL;
+ if (spi->mode & SPI_CPHA)
+ reg |= ORION_SPI_MODE_CPHA;
+ writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+}
+
/*
* called only when no transfer is active on the bus
*/
@@ -142,11 +144,20 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if ((t != NULL) && t->bits_per_word)
bits_per_word = t->bits_per_word;
+ orion_spi_mode_set(spi);
+
rc = orion_spi_baudrate_set(spi, speed);
if (rc)
return rc;
- return orion_spi_set_transfer_size(orion_spi, bits_per_word);
+ if (bits_per_word == 16)
+ orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+ else
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+
+ return 0;
}
static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable)
@@ -236,11 +247,9 @@ orion_spi_write_read_16bit(struct spi_device *spi,
static unsigned int
orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct orion_spi *orion_spi;
unsigned int count;
int word_len;
- orion_spi = spi_master_get_devdata(spi->master);
word_len = spi->bits_per_word;
count = xfer->len;
@@ -286,27 +295,6 @@ static int orion_spi_transfer_one_message(struct spi_master *master,
goto msg_done;
list_for_each_entry(t, &m->transfers, transfer_list) {
- /* make sure buffer length is even when working in 16
- * bit mode*/
- if ((t->bits_per_word == 16) && (t->len & 1)) {
- dev_err(&spi->dev,
- "message rejected : "
- "odd data length %d while in 16 bit mode\n",
- t->len);
- status = -EIO;
- goto msg_done;
- }
-
- if (t->speed_hz && t->speed_hz < orion_spi->min_speed) {
- dev_err(&spi->dev,
- "message rejected : "
- "device min speed (%d Hz) exceeds "
- "required transfer speed (%d Hz)\n",
- orion_spi->min_speed, t->speed_hz);
- status = -EIO;
- goto msg_done;
- }
-
if (par_override || t->speed_hz || t->bits_per_word) {
par_override = 1;
status = orion_spi_setup_transfer(spi, t);
@@ -343,7 +331,7 @@ msg_done:
return 0;
}
-static int __init orion_spi_reset(struct orion_spi *orion_spi)
+static int orion_spi_reset(struct orion_spi *orion_spi)
{
/* Verify that the CS is deasserted */
orion_spi_set_cs(orion_spi, 0);
@@ -351,29 +339,7 @@ static int __init orion_spi_reset(struct orion_spi *orion_spi)
return 0;
}
-static int orion_spi_setup(struct spi_device *spi)
-{
- struct orion_spi *orion_spi;
-
- orion_spi = spi_master_get_devdata(spi->master);
-
- if ((spi->max_speed_hz == 0)
- || (spi->max_speed_hz > orion_spi->max_speed))
- spi->max_speed_hz = orion_spi->max_speed;
-
- if (spi->max_speed_hz < orion_spi->min_speed) {
- dev_err(&spi->dev, "setup: requested speed too low %d Hz\n",
- spi->max_speed_hz);
- return -EINVAL;
- }
-
- /*
- * baudrate & width will be set orion_spi_setup_transfer
- */
- return 0;
-}
-
-static int __init orion_spi_probe(struct platform_device *pdev)
+static int orion_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct orion_spi *spi;
@@ -383,7 +349,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
const u32 *iprop;
int size;
- master = spi_alloc_master(&pdev->dev, sizeof *spi);
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi));
if (master == NULL) {
dev_dbg(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
@@ -399,18 +365,18 @@ static int __init orion_spi_probe(struct platform_device *pdev)
}
/* we support only mode 0, and no options */
- master->mode_bits = 0;
+ master->mode_bits = SPI_CPHA | SPI_CPOL;
- master->setup = orion_spi_setup;
master->transfer_one_message = orion_spi_transfer_one_message;
master->num_chipselect = ORION_NUM_CHIPSELECTS;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
- dev_set_drvdata(&pdev->dev, master);
+ platform_set_drvdata(pdev, master);
spi = spi_master_get_devdata(master);
spi->master = master;
- spi->clk = clk_get(&pdev->dev, NULL);
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk)) {
status = PTR_ERR(spi->clk);
goto out;
@@ -419,66 +385,50 @@ static int __init orion_spi_probe(struct platform_device *pdev)
clk_prepare(spi->clk);
clk_enable(spi->clk);
tclk_hz = clk_get_rate(spi->clk);
- spi->max_speed = DIV_ROUND_UP(tclk_hz, 4);
- spi->min_speed = DIV_ROUND_UP(tclk_hz, 30);
+ master->max_speed_hz = DIV_ROUND_UP(tclk_hz, 4);
+ master->min_speed_hz = DIV_ROUND_UP(tclk_hz, 30);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- status = -ENODEV;
- goto out_rel_clk;
- }
-
- if (!request_mem_region(r->start, resource_size(r),
- dev_name(&pdev->dev))) {
- status = -EBUSY;
+ spi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(spi->base)) {
+ status = PTR_ERR(spi->base);
goto out_rel_clk;
}
- spi->base = ioremap(r->start, SZ_1K);
if (orion_spi_reset(spi) < 0)
- goto out_rel_mem;
+ goto out_rel_clk;
master->dev.of_node = pdev->dev.of_node;
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status < 0)
- goto out_rel_mem;
+ goto out_rel_clk;
return status;
-out_rel_mem:
- release_mem_region(r->start, resource_size(r));
out_rel_clk:
clk_disable_unprepare(spi->clk);
- clk_put(spi->clk);
out:
spi_master_put(master);
return status;
}
-static int __exit orion_spi_remove(struct platform_device *pdev)
+static int orion_spi_remove(struct platform_device *pdev)
{
struct spi_master *master;
- struct resource *r;
struct orion_spi *spi;
- master = dev_get_drvdata(&pdev->dev);
+ master = platform_get_drvdata(pdev);
spi = spi_master_get_devdata(master);
clk_disable_unprepare(spi->clk);
- clk_put(spi->clk);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, resource_size(r));
-
- spi_unregister_master(master);
return 0;
}
MODULE_ALIAS("platform:" DRIVER_NAME);
-static const struct of_device_id orion_spi_of_match_table[] __devinitdata = {
+static const struct of_device_id orion_spi_of_match_table[] = {
{ .compatible = "marvell,orion-spi", },
{}
};
@@ -490,20 +440,11 @@ static struct platform_driver orion_spi_driver = {
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(orion_spi_of_match_table),
},
- .remove = __exit_p(orion_spi_remove),
+ .probe = orion_spi_probe,
+ .remove = orion_spi_remove,
};
-static int __init orion_spi_init(void)
-{
- return platform_driver_probe(&orion_spi_driver, orion_spi_probe);
-}
-module_init(orion_spi_init);
-
-static void __exit orion_spi_exit(void)
-{
- platform_driver_unregister(&orion_spi_driver);
-}
-module_exit(orion_spi_exit);
+module_platform_driver(orion_spi_driver);
MODULE_DESCRIPTION("Orion SPI driver");
MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 919464102d3..66d2ae21e78 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -368,10 +368,6 @@ struct pl022 {
resource_size_t phybase;
void __iomem *virtbase;
struct clk *clk;
- /* Two optional pin states - default & sleep */
- struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
- struct pinctrl_state *pins_sleep;
struct spi_master *master;
struct pl022_ssp_controller *master_info;
/* Message per-transfer pump */
@@ -463,9 +459,8 @@ static void giveback(struct pl022 *pl022)
struct spi_transfer *last_transfer;
pl022->next_msg_cs_active = false;
- last_transfer = list_entry(pl022->cur_msg->transfers.prev,
- struct spi_transfer,
- transfer_list);
+ last_transfer = list_last_entry(&pl022->cur_msg->transfers,
+ struct spi_transfer, transfer_list);
/* Delay if requested before any change in chip select */
if (last_transfer->delay_usecs)
@@ -1088,7 +1083,7 @@ err_alloc_rx_sg:
return -ENOMEM;
}
-static int __devinit pl022_dma_probe(struct pl022 *pl022)
+static int pl022_dma_probe(struct pl022 *pl022)
{
dma_cap_mask_t mask;
@@ -1116,10 +1111,8 @@ static int __devinit pl022_dma_probe(struct pl022 *pl022)
}
pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!pl022->dummypage) {
- dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n");
+ if (!pl022->dummypage)
goto err_no_dummypage;
- }
dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
dma_chan_name(pl022->dma_rx_channel),
@@ -1138,6 +1131,35 @@ err_no_rxchan:
return -ENODEV;
}
+static int pl022_dma_autoprobe(struct pl022 *pl022)
+{
+ struct device *dev = &pl022->adev->dev;
+
+ /* automatically configure DMA channels from platform, normally using DT */
+ pl022->dma_rx_channel = dma_request_slave_channel(dev, "rx");
+ if (!pl022->dma_rx_channel)
+ goto err_no_rxchan;
+
+ pl022->dma_tx_channel = dma_request_slave_channel(dev, "tx");
+ if (!pl022->dma_tx_channel)
+ goto err_no_txchan;
+
+ pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pl022->dummypage)
+ goto err_no_dummypage;
+
+ return 0;
+
+err_no_dummypage:
+ dma_release_channel(pl022->dma_tx_channel);
+ pl022->dma_tx_channel = NULL;
+err_no_txchan:
+ dma_release_channel(pl022->dma_rx_channel);
+ pl022->dma_rx_channel = NULL;
+err_no_rxchan:
+ return -ENODEV;
+}
+
static void terminate_dma(struct pl022 *pl022)
{
struct dma_chan *rxchan = pl022->dma_rx_channel;
@@ -1166,6 +1188,11 @@ static inline int configure_dma(struct pl022 *pl022)
return -ENODEV;
}
+static inline int pl022_dma_autoprobe(struct pl022 *pl022)
+{
+ return 0;
+}
+
static inline int pl022_dma_probe(struct pl022 *pl022)
{
return 0;
@@ -1525,18 +1552,6 @@ static int pl022_transfer_one_message(struct spi_master *master,
return 0;
}
-static int pl022_prepare_transfer_hardware(struct spi_master *master)
-{
- struct pl022 *pl022 = spi_master_get_devdata(master);
-
- /*
- * Just make sure we have all we need to run the transfer by syncing
- * with the runtime PM framework.
- */
- pm_runtime_get_sync(&pl022->adev->dev);
- return 0;
-}
-
static int pl022_unprepare_transfer_hardware(struct spi_master *master)
{
struct pl022 *pl022 = spi_master_get_devdata(master);
@@ -1545,13 +1560,6 @@ static int pl022_unprepare_transfer_hardware(struct spi_master *master)
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
- if (pl022->master_info->autosuspend_delay > 0) {
- pm_runtime_mark_last_busy(&pl022->adev->dev);
- pm_runtime_put_autosuspend(&pl022->adev->dev);
- } else {
- pm_runtime_put(&pl022->adev->dev);
- }
-
return 0;
}
@@ -1608,7 +1616,6 @@ static int verify_controller_parameters(struct pl022 *pl022,
dev_err(&pl022->adev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
- break;
}
switch (chip_info->tx_lev_trig) {
case SSP_TX_1_OR_MORE_EMPTY_LOC:
@@ -1634,7 +1641,6 @@ static int verify_controller_parameters(struct pl022 *pl022,
dev_err(&pl022->adev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
- break;
}
if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
if ((chip_info->ctrl_len < SSP_BITS_4)
@@ -1801,11 +1807,8 @@ static int pl022_setup(struct spi_device *spi)
if (chip == NULL) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
- if (!chip) {
- dev_err(&spi->dev,
- "cannot allocate controller state\n");
+ if (!chip)
return -ENOMEM;
- }
dev_dbg(&spi->dev,
"allocated memory for controller's runtime state\n");
}
@@ -2042,12 +2045,11 @@ pl022_platform_data_dt_get(struct device *dev)
}
pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL);
- if (!pd) {
- dev_err(dev, "cannot allocate platform data memory\n");
+ if (!pd)
return NULL;
- }
pd->bus_id = -1;
+ pd->enable_dma = 1;
of_property_read_u32(np, "num-cs", &tmp);
pd->num_chipselect = tmp;
of_property_read_u32(np, "pl022,autosuspend-delay",
@@ -2057,11 +2059,11 @@ pl022_platform_data_dt_get(struct device *dev)
return pd;
}
-static int __devinit
-pl022_probe(struct amba_device *adev, const struct amba_id *id)
+static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
- struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
+ struct pl022_ssp_controller *platform_info =
+ dev_get_platdata(&adev->dev);
struct spi_master *master;
struct pl022 *pl022 = NULL; /*Data for this driver */
struct device_node *np = adev->dev.of_node;
@@ -2099,28 +2101,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
pl022->chipselects = devm_kzalloc(dev, num_cs * sizeof(int),
GFP_KERNEL);
- pl022->pinctrl = devm_pinctrl_get(dev);
- if (IS_ERR(pl022->pinctrl)) {
- status = PTR_ERR(pl022->pinctrl);
- goto err_no_pinctrl;
- }
-
- pl022->pins_default = pinctrl_lookup_state(pl022->pinctrl,
- PINCTRL_STATE_DEFAULT);
- /* enable pins to be muxed in and configured */
- if (!IS_ERR(pl022->pins_default)) {
- status = pinctrl_select_state(pl022->pinctrl,
- pl022->pins_default);
- if (status)
- dev_err(dev, "could not set default pins\n");
- } else
- dev_err(dev, "could not get default pinstate\n");
-
- pl022->pins_sleep = pinctrl_lookup_state(pl022->pinctrl,
- PINCTRL_STATE_SLEEP);
- if (IS_ERR(pl022->pins_sleep))
- dev_dbg(dev, "could not get sleep pinstate\n");
-
/*
* Bus Number Which has been Assigned to this SSP controller
* on this board
@@ -2129,7 +2109,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
master->num_chipselect = num_cs;
master->cleanup = pl022_cleanup;
master->setup = pl022_setup;
- master->prepare_transfer_hardware = pl022_prepare_transfer_hardware;
+ master->auto_runtime_pm = true;
master->transfer_one_message = pl022_transfer_one_message;
master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
master->rt = platform_info->rt;
@@ -2183,10 +2163,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
status = -ENOMEM;
goto err_no_ioremap;
}
- printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
- adev->res.start, pl022->virtbase);
-
- pm_runtime_resume(dev);
+ dev_info(&adev->dev, "mapped registers from %pa to %p\n",
+ &adev->res.start, pl022->virtbase);
pl022->clk = devm_clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
@@ -2195,13 +2173,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_no_clk;
}
- status = clk_prepare(pl022->clk);
- if (status) {
- dev_err(&adev->dev, "could not prepare SSP/SPI bus clock\n");
- goto err_clk_prep;
- }
-
- status = clk_enable(pl022->clk);
+ status = clk_prepare_enable(pl022->clk);
if (status) {
dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
goto err_no_clk_en;
@@ -2223,8 +2195,13 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_no_irq;
}
- /* Get DMA channels */
- if (platform_info->enable_dma) {
+ /* Get DMA channels, try autoconfiguration first */
+ status = pl022_dma_autoprobe(pl022);
+
+ /* If that failed, use channels from platform_info */
+ if (status == 0)
+ platform_info->enable_dma = 1;
+ else if (platform_info->enable_dma) {
status = pl022_dma_probe(pl022);
if (status != 0)
platform_info->enable_dma = 0;
@@ -2232,7 +2209,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
/* Register with the SPI framework */
amba_set_drvdata(adev, pl022);
- status = spi_register_master(master);
+ status = devm_spi_register_master(&adev->dev, master);
if (status != 0) {
dev_err(&adev->dev,
"probe - problem registering spi master\n");
@@ -2248,31 +2225,27 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
pm_runtime_set_autosuspend_delay(dev,
platform_info->autosuspend_delay);
pm_runtime_use_autosuspend(dev);
- pm_runtime_put_autosuspend(dev);
- } else {
- pm_runtime_put(dev);
}
+ pm_runtime_put(dev);
+
return 0;
err_spi_register:
if (platform_info->enable_dma)
pl022_dma_remove(pl022);
err_no_irq:
- clk_disable(pl022->clk);
+ clk_disable_unprepare(pl022->clk);
err_no_clk_en:
- clk_unprepare(pl022->clk);
- err_clk_prep:
err_no_clk:
err_no_ioremap:
amba_release_regions(adev);
err_no_ioregion:
err_no_gpio:
- err_no_pinctrl:
spi_master_put(master);
return status;
}
-static int __devexit
+static int
pl022_remove(struct amba_device *adev)
{
struct pl022 *pl022 = amba_get_drvdata(adev);
@@ -2290,56 +2263,13 @@ pl022_remove(struct amba_device *adev)
if (pl022->master_info->enable_dma)
pl022_dma_remove(pl022);
- clk_disable(pl022->clk);
- clk_unprepare(pl022->clk);
- pm_runtime_disable(&adev->dev);
+ clk_disable_unprepare(pl022->clk);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
- spi_unregister_master(pl022->master);
- amba_set_drvdata(adev, NULL);
return 0;
}
-#if defined(CONFIG_SUSPEND) || defined(CONFIG_PM_RUNTIME)
-/*
- * These two functions are used from both suspend/resume and
- * the runtime counterparts to handle external resources like
- * clocks, pins and regulators when going to sleep.
- */
-static void pl022_suspend_resources(struct pl022 *pl022)
-{
- int ret;
-
- clk_disable(pl022->clk);
-
- /* Optionally let pins go into sleep states */
- if (!IS_ERR(pl022->pins_sleep)) {
- ret = pinctrl_select_state(pl022->pinctrl,
- pl022->pins_sleep);
- if (ret)
- dev_err(&pl022->adev->dev,
- "could not set pins to sleep state\n");
- }
-}
-
-static void pl022_resume_resources(struct pl022 *pl022)
-{
- int ret;
-
- /* Optionaly enable pins to be muxed in and configured */
- if (!IS_ERR(pl022->pins_default)) {
- ret = pinctrl_select_state(pl022->pinctrl,
- pl022->pins_default);
- if (ret)
- dev_err(&pl022->adev->dev,
- "could not set default pins\n");
- }
-
- clk_enable(pl022->clk);
-}
-#endif
-
-#ifdef CONFIG_SUSPEND
+#ifdef CONFIG_PM_SLEEP
static int pl022_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
@@ -2350,7 +2280,14 @@ static int pl022_suspend(struct device *dev)
dev_warn(dev, "cannot suspend master\n");
return ret;
}
- pl022_suspend_resources(pl022);
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret) {
+ spi_master_resume(pl022->master);
+ return ret;
+ }
+
+ pinctrl_pm_select_sleep_state(dev);
dev_dbg(dev, "suspended\n");
return 0;
@@ -2361,7 +2298,9 @@ static int pl022_resume(struct device *dev)
struct pl022 *pl022 = dev_get_drvdata(dev);
int ret;
- pl022_resume_resources(pl022);
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ dev_err(dev, "problem resuming\n");
/* Start the queue running */
ret = spi_master_resume(pl022->master);
@@ -2372,14 +2311,16 @@ static int pl022_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM */
+#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int pl022_runtime_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_suspend_resources(pl022);
+ clk_disable_unprepare(pl022->clk);
+ pinctrl_pm_select_idle_state(dev);
+
return 0;
}
@@ -2387,14 +2328,16 @@ static int pl022_runtime_resume(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_resume_resources(pl022);
+ pinctrl_pm_select_default_state(dev);
+ clk_prepare_enable(pl022->clk);
+
return 0;
}
#endif
static const struct dev_pm_ops pl022_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
- SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
+ SET_PM_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
};
static struct vendor_data vendor_arm = {
@@ -2467,7 +2410,7 @@ static struct amba_driver pl022_driver = {
},
.id_table = pl022_ids,
.probe = pl022_probe,
- .remove = __devexit_p(pl022_remove),
+ .remove = pl022_remove,
};
static int __init pl022_init(void)
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 7a85f22b647..80b8408ac3e 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -24,11 +24,12 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/wait.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/interrupt.h>
@@ -190,12 +191,6 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
speed = min(t->speed_hz, spi->max_speed_hz);
}
- if (bits_per_word != 8) {
- dev_err(&spi->dev, "invalid bits-per-word (%d)\n",
- bits_per_word);
- return -EINVAL;
- }
-
if (!speed || (speed > spi->max_speed_hz)) {
dev_err(&spi->dev, "invalid speed_hz (%d)\n", speed);
return -EINVAL;
@@ -229,12 +224,6 @@ static int spi_ppc4xx_setup(struct spi_device *spi)
{
struct spi_ppc4xx_cs *cs = spi->controller_state;
- if (spi->bits_per_word != 8) {
- dev_err(&spi->dev, "invalid bits-per-word (%d)\n",
- spi->bits_per_word);
- return -EINVAL;
- }
-
if (!spi->max_speed_hz) {
dev_err(&spi->dev, "invalid max_speed_hz (must be non-zero)\n");
return -EINVAL;
@@ -389,7 +378,7 @@ static void free_gpios(struct ppc4xx_spi *hw)
/*
* platform_device layer stuff...
*/
-static int __init spi_ppc4xx_of_probe(struct platform_device *op)
+static int spi_ppc4xx_of_probe(struct platform_device *op)
{
struct ppc4xx_spi *hw;
struct spi_master *master;
@@ -406,9 +395,9 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
if (master == NULL)
return -ENOMEM;
master->dev.of_node = np;
- dev_set_drvdata(dev, master);
+ platform_set_drvdata(op, master);
hw = spi_master_get_devdata(master);
- hw->master = spi_master_get(master);
+ hw->master = master;
hw->dev = dev;
init_completion(&hw->done);
@@ -419,7 +408,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
* This includes both "null" gpio's and real ones.
*/
num_gpios = of_gpio_count(np);
- if (num_gpios) {
+ if (num_gpios > 0) {
int i;
hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL);
@@ -465,13 +454,14 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
bbp->use_dma = 0;
bbp->master->setup = spi_ppc4xx_setup;
bbp->master->cleanup = spi_ppc4xx_cleanup;
+ bbp->master->bits_per_word_mask = SPI_BPW_MASK(8);
/* the spi->mode bits understood by this driver: */
bbp->master->mode_bits =
SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST;
/* this many pins in all GPIO controllers */
- bbp->master->num_chipselect = num_gpios;
+ bbp->master->num_chipselect = num_gpios > 0 ? num_gpios : 0;
/* Get the clock for the OPB */
opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb");
@@ -553,24 +543,23 @@ request_mem_error:
free_gpios:
free_gpios(hw);
free_master:
- dev_set_drvdata(dev, NULL);
spi_master_put(master);
dev_err(dev, "initialization failed\n");
return ret;
}
-static int __exit spi_ppc4xx_of_remove(struct platform_device *op)
+static int spi_ppc4xx_of_remove(struct platform_device *op)
{
- struct spi_master *master = dev_get_drvdata(&op->dev);
+ struct spi_master *master = platform_get_drvdata(op);
struct ppc4xx_spi *hw = spi_master_get_devdata(master);
spi_bitbang_stop(&hw->bitbang);
- dev_set_drvdata(&op->dev, NULL);
release_mem_region(hw->mapbase, hw->mapsize);
free_irq(hw->irqnum, hw);
iounmap(hw->regs);
free_gpios(hw);
+ spi_master_put(master);
return 0;
}
@@ -583,7 +572,7 @@ MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match);
static struct platform_driver spi_ppc4xx_of_driver = {
.probe = spi_ppc4xx_of_probe,
- .remove = __exit_p(spi_ppc4xx_of_remove),
+ .remove = spi_ppc4xx_of_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
new file mode 100644
index 00000000000..c41ff148a2b
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -0,0 +1,376 @@
+/*
+ * PXA2xx SPI DMA engine support.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
+
+#include "spi-pxa2xx.h"
+
+static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
+ enum dma_data_direction dir)
+{
+ int i, nents, len = drv_data->len;
+ struct scatterlist *sg;
+ struct device *dmadev;
+ struct sg_table *sgt;
+ void *buf, *pbuf;
+
+ if (dir == DMA_TO_DEVICE) {
+ dmadev = drv_data->tx_chan->device->dev;
+ sgt = &drv_data->tx_sgt;
+ buf = drv_data->tx;
+ drv_data->tx_map_len = len;
+ } else {
+ dmadev = drv_data->rx_chan->device->dev;
+ sgt = &drv_data->rx_sgt;
+ buf = drv_data->rx;
+ drv_data->rx_map_len = len;
+ }
+
+ nents = DIV_ROUND_UP(len, SZ_2K);
+ if (nents != sgt->nents) {
+ int ret;
+
+ sg_free_table(sgt);
+ ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
+ if (ret)
+ return ret;
+ }
+
+ pbuf = buf;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = min_t(size_t, len, SZ_2K);
+
+ if (buf)
+ sg_set_buf(sg, pbuf, bytes);
+ else
+ sg_set_buf(sg, drv_data->dummy, bytes);
+
+ pbuf += bytes;
+ len -= bytes;
+ }
+
+ nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir);
+ if (!nents)
+ return -ENOMEM;
+
+ return nents;
+}
+
+static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data,
+ enum dma_data_direction dir)
+{
+ struct device *dmadev;
+ struct sg_table *sgt;
+
+ if (dir == DMA_TO_DEVICE) {
+ dmadev = drv_data->tx_chan->device->dev;
+ sgt = &drv_data->tx_sgt;
+ } else {
+ dmadev = drv_data->rx_chan->device->dev;
+ sgt = &drv_data->rx_sgt;
+ }
+
+ dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir);
+}
+
+static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
+{
+ if (!drv_data->dma_mapped)
+ return;
+
+ pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE);
+ pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
+
+ drv_data->dma_mapped = 0;
+}
+
+static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
+ bool error)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+
+ /*
+ * It is possible that one CPU is handling ROR interrupt and other
+ * just gets DMA completion. Calling pump_transfers() twice for the
+ * same transfer leads to problems thus we prevent concurrent calls
+ * by using ->dma_running.
+ */
+ if (atomic_dec_and_test(&drv_data->dma_running)) {
+ void __iomem *reg = drv_data->ioaddr;
+
+ /*
+ * If the other CPU is still handling the ROR interrupt we
+ * might not know about the error yet. So we re-check the
+ * ROR bit here before we clear the status register.
+ */
+ if (!error) {
+ u32 status = read_SSSR(reg) & drv_data->mask_sr;
+ error = status & SSSR_ROR;
+ }
+
+ /* Clear status & disable interrupts */
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ if (!pxa25x_ssp_comp(drv_data))
+ write_SSTO(0, reg);
+
+ if (!error) {
+ pxa2xx_spi_unmap_dma_buffers(drv_data);
+
+ drv_data->tx += drv_data->tx_map_len;
+ drv_data->rx += drv_data->rx_map_len;
+
+ msg->actual_length += drv_data->len;
+ msg->state = pxa2xx_spi_next_transfer(drv_data);
+ } else {
+ /* In case we got an error we disable the SSP now */
+ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+
+ msg->state = ERROR_STATE;
+ }
+
+ tasklet_schedule(&drv_data->pump_transfers);
+ }
+}
+
+static void pxa2xx_spi_dma_callback(void *data)
+{
+ pxa2xx_spi_dma_transfer_complete(data, false);
+}
+
+static struct dma_async_tx_descriptor *
+pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
+ enum dma_transfer_direction dir)
+{
+ struct pxa2xx_spi_master *pdata = drv_data->master_info;
+ struct chip_data *chip = drv_data->cur_chip;
+ enum dma_slave_buswidth width;
+ struct dma_slave_config cfg;
+ struct dma_chan *chan;
+ struct sg_table *sgt;
+ int nents, ret;
+
+ switch (drv_data->n_bytes) {
+ case 1:
+ width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case 2:
+ width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ default:
+ width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = dir;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ cfg.dst_addr = drv_data->ssdr_physical;
+ cfg.dst_addr_width = width;
+ cfg.dst_maxburst = chip->dma_burst_size;
+ cfg.slave_id = pdata->tx_slave_id;
+
+ sgt = &drv_data->tx_sgt;
+ nents = drv_data->tx_nents;
+ chan = drv_data->tx_chan;
+ } else {
+ cfg.src_addr = drv_data->ssdr_physical;
+ cfg.src_addr_width = width;
+ cfg.src_maxburst = chip->dma_burst_size;
+ cfg.slave_id = pdata->rx_slave_id;
+
+ sgt = &drv_data->rx_sgt;
+ nents = drv_data->rx_nents;
+ chan = drv_data->rx_chan;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
+ return NULL;
+ }
+
+ return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+}
+
+static bool pxa2xx_spi_dma_filter(struct dma_chan *chan, void *param)
+{
+ const struct pxa2xx_spi_master *pdata = param;
+
+ return chan->chan_id == pdata->tx_chan_id ||
+ chan->chan_id == pdata->rx_chan_id;
+}
+
+bool pxa2xx_spi_dma_is_possible(size_t len)
+{
+ return len <= MAX_DMA_LEN;
+}
+
+int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
+{
+ const struct chip_data *chip = drv_data->cur_chip;
+ int ret;
+
+ if (!chip->enable_dma)
+ return 0;
+
+ /* Don't bother with DMA if we can't do even a single burst */
+ if (drv_data->len < chip->dma_burst_size)
+ return 0;
+
+ ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE);
+ if (ret <= 0) {
+ dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n");
+ return 0;
+ }
+
+ drv_data->tx_nents = ret;
+
+ ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE);
+ if (ret <= 0) {
+ pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
+ dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n");
+ return 0;
+ }
+
+ drv_data->rx_nents = ret;
+ return 1;
+}
+
+irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
+{
+ u32 status;
+
+ status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr;
+ if (status & SSSR_ROR) {
+ dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
+
+ dmaengine_terminate_all(drv_data->rx_chan);
+ dmaengine_terminate_all(drv_data->tx_chan);
+
+ pxa2xx_spi_dma_transfer_complete(drv_data, true);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
+{
+ struct dma_async_tx_descriptor *tx_desc, *rx_desc;
+
+ tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV);
+ if (!tx_desc) {
+ dev_err(&drv_data->pdev->dev,
+ "failed to get DMA TX descriptor\n");
+ return -EBUSY;
+ }
+
+ rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM);
+ if (!rx_desc) {
+ dev_err(&drv_data->pdev->dev,
+ "failed to get DMA RX descriptor\n");
+ return -EBUSY;
+ }
+
+ /* We are ready when RX completes */
+ rx_desc->callback = pxa2xx_spi_dma_callback;
+ rx_desc->callback_param = drv_data;
+
+ dmaengine_submit(rx_desc);
+ dmaengine_submit(tx_desc);
+ return 0;
+}
+
+void pxa2xx_spi_dma_start(struct driver_data *drv_data)
+{
+ dma_async_issue_pending(drv_data->rx_chan);
+ dma_async_issue_pending(drv_data->tx_chan);
+
+ atomic_set(&drv_data->dma_running, 1);
+}
+
+int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
+{
+ struct pxa2xx_spi_master *pdata = drv_data->master_info;
+ struct device *dev = &drv_data->pdev->dev;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ drv_data->dummy = devm_kzalloc(dev, SZ_2K, GFP_KERNEL);
+ if (!drv_data->dummy)
+ return -ENOMEM;
+
+ drv_data->tx_chan = dma_request_slave_channel_compat(mask,
+ pxa2xx_spi_dma_filter, pdata, dev, "tx");
+ if (!drv_data->tx_chan)
+ return -ENODEV;
+
+ drv_data->rx_chan = dma_request_slave_channel_compat(mask,
+ pxa2xx_spi_dma_filter, pdata, dev, "rx");
+ if (!drv_data->rx_chan) {
+ dma_release_channel(drv_data->tx_chan);
+ drv_data->tx_chan = NULL;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void pxa2xx_spi_dma_release(struct driver_data *drv_data)
+{
+ if (drv_data->rx_chan) {
+ dmaengine_terminate_all(drv_data->rx_chan);
+ dma_release_channel(drv_data->rx_chan);
+ sg_free_table(&drv_data->rx_sgt);
+ drv_data->rx_chan = NULL;
+ }
+ if (drv_data->tx_chan) {
+ dmaengine_terminate_all(drv_data->tx_chan);
+ dma_release_channel(drv_data->tx_chan);
+ sg_free_table(&drv_data->tx_sgt);
+ drv_data->tx_chan = NULL;
+ }
+}
+
+void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
+{
+}
+
+int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word, u32 *burst_code,
+ u32 *threshold)
+{
+ struct pxa2xx_spi_chip *chip_info = spi->controller_data;
+
+ /*
+ * If the DMA burst size is given in chip_info we use that,
+ * otherwise we use the default. Also we use the default FIFO
+ * thresholds for now.
+ */
+ *burst_code = chip_info ? chip_info->dma_burst_size : 1;
+ *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
+ | SSCR1_TxTresh(TX_THRESH_DFLT);
+
+ return 0;
+}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 9f6ba34b172..c1865c92ccb 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -8,164 +8,121 @@
#include <linux/module.h>
#include <linux/spi/pxa2xx_spi.h>
-struct ce4100_info {
- struct ssp_device ssp;
- struct platform_device *spi_pdev;
+enum {
+ PORT_CE4100,
+ PORT_BYT,
};
-static DEFINE_MUTEX(ssp_lock);
-static LIST_HEAD(ssp_list);
-
-struct ssp_device *pxa_ssp_request(int port, const char *label)
-{
- struct ssp_device *ssp = NULL;
-
- mutex_lock(&ssp_lock);
-
- list_for_each_entry(ssp, &ssp_list, node) {
- if (ssp->port_id == port && ssp->use_count == 0) {
- ssp->use_count++;
- ssp->label = label;
- break;
- }
- }
-
- mutex_unlock(&ssp_lock);
-
- if (&ssp->node == &ssp_list)
- return NULL;
-
- return ssp;
-}
-EXPORT_SYMBOL_GPL(pxa_ssp_request);
+struct pxa_spi_info {
+ enum pxa_ssp_type type;
+ int port_id;
+ int num_chipselect;
+ int tx_slave_id;
+ int tx_chan_id;
+ int rx_slave_id;
+ int rx_chan_id;
+};
-void pxa_ssp_free(struct ssp_device *ssp)
-{
- mutex_lock(&ssp_lock);
- if (ssp->use_count) {
- ssp->use_count--;
- ssp->label = NULL;
- } else
- dev_err(&ssp->pdev->dev, "device already free\n");
- mutex_unlock(&ssp_lock);
-}
-EXPORT_SYMBOL_GPL(pxa_ssp_free);
+static struct pxa_spi_info spi_info_configs[] = {
+ [PORT_CE4100] = {
+ .type = PXA25x_SSP,
+ .port_id = -1,
+ .num_chipselect = -1,
+ .tx_slave_id = -1,
+ .tx_chan_id = -1,
+ .rx_slave_id = -1,
+ .rx_chan_id = -1,
+ },
+ [PORT_BYT] = {
+ .type = LPSS_SSP,
+ .port_id = 0,
+ .num_chipselect = 1,
+ .tx_slave_id = 0,
+ .tx_chan_id = 0,
+ .rx_slave_id = 1,
+ .rx_chan_id = 1,
+ },
+};
-static int __devinit ce4100_spi_probe(struct pci_dev *dev,
+static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
+ struct platform_device_info pi;
int ret;
- resource_size_t phys_beg;
- resource_size_t phys_len;
- struct ce4100_info *spi_info;
struct platform_device *pdev;
struct pxa2xx_spi_master spi_pdata;
struct ssp_device *ssp;
+ struct pxa_spi_info *c;
- ret = pci_enable_device(dev);
+ ret = pcim_enable_device(dev);
if (ret)
return ret;
- phys_beg = pci_resource_start(dev, 0);
- phys_len = pci_resource_len(dev, 0);
-
- if (!request_mem_region(phys_beg, phys_len,
- "CE4100 SPI")) {
- dev_err(&dev->dev, "Can't request register space.\n");
- ret = -EBUSY;
+ ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI");
+ if (ret)
return ret;
- }
- pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
- spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
- if (!pdev || !spi_info ) {
- ret = -ENOMEM;
- goto err_nomem;
- }
- memset(&spi_pdata, 0, sizeof(spi_pdata));
- spi_pdata.num_chipselect = dev->devfn;
+ c = &spi_info_configs[ent->driver_data];
- ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
- if (ret)
- goto err_nomem;
-
- pdev->dev.parent = &dev->dev;
- pdev->dev.of_node = dev->dev.of_node;
- ssp = &spi_info->ssp;
+ memset(&spi_pdata, 0, sizeof(spi_pdata));
+ spi_pdata.num_chipselect = (c->num_chipselect > 0) ?
+ c->num_chipselect : dev->devfn;
+ spi_pdata.tx_slave_id = c->tx_slave_id;
+ spi_pdata.tx_chan_id = c->tx_chan_id;
+ spi_pdata.rx_slave_id = c->rx_slave_id;
+ spi_pdata.rx_chan_id = c->rx_chan_id;
+ spi_pdata.enable_dma = c->rx_slave_id >= 0 && c->tx_slave_id >= 0;
+
+ ssp = &spi_pdata.ssp;
ssp->phys_base = pci_resource_start(dev, 0);
- ssp->mmio_base = ioremap(phys_beg, phys_len);
+ ssp->mmio_base = pcim_iomap_table(dev)[0];
if (!ssp->mmio_base) {
- dev_err(&pdev->dev, "failed to ioremap() registers\n");
- ret = -EIO;
- goto err_nomem;
+ dev_err(&dev->dev, "failed to ioremap() registers\n");
+ return -EIO;
}
ssp->irq = dev->irq;
- ssp->port_id = pdev->id;
- ssp->type = PXA25x_SSP;
+ ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
+ ssp->type = c->type;
- mutex_lock(&ssp_lock);
- list_add(&ssp->node, &ssp_list);
- mutex_unlock(&ssp_lock);
+ memset(&pi, 0, sizeof(pi));
+ pi.parent = &dev->dev;
+ pi.name = "pxa2xx-spi";
+ pi.id = ssp->port_id;
+ pi.data = &spi_pdata;
+ pi.size_data = sizeof(spi_pdata);
- pci_set_drvdata(dev, spi_info);
+ pdev = platform_device_register_full(&pi);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
- ret = platform_device_add(pdev);
- if (ret)
- goto err_dev_add;
-
- return ret;
-
-err_dev_add:
- pci_set_drvdata(dev, NULL);
- mutex_lock(&ssp_lock);
- list_del(&ssp->node);
- mutex_unlock(&ssp_lock);
- iounmap(ssp->mmio_base);
-
-err_nomem:
- release_mem_region(phys_beg, phys_len);
- platform_device_put(pdev);
- kfree(spi_info);
- return ret;
+ pci_set_drvdata(dev, pdev);
+
+ return 0;
}
-static void __devexit ce4100_spi_remove(struct pci_dev *dev)
+static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
{
- struct ce4100_info *spi_info;
- struct ssp_device *ssp;
-
- spi_info = pci_get_drvdata(dev);
- ssp = &spi_info->ssp;
- platform_device_unregister(spi_info->spi_pdev);
-
- iounmap(ssp->mmio_base);
- release_mem_region(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
-
- mutex_lock(&ssp_lock);
- list_del(&ssp->node);
- mutex_unlock(&ssp_lock);
+ struct platform_device *pdev = pci_get_drvdata(dev);
- pci_set_drvdata(dev, NULL);
- pci_disable_device(dev);
- kfree(spi_info);
+ platform_device_unregister(pdev);
}
-static DEFINE_PCI_DEVICE_TABLE(ce4100_spi_devices) = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
+static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
+ { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT },
{ },
};
-MODULE_DEVICE_TABLE(pci, ce4100_spi_devices);
+MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
-static struct pci_driver ce4100_spi_driver = {
- .name = "ce4100_spi",
- .id_table = ce4100_spi_devices,
- .probe = ce4100_spi_probe,
- .remove = __devexit_p(ce4100_spi_remove),
+static struct pci_driver pxa2xx_spi_pci_driver = {
+ .name = "pxa2xx_spi_pci",
+ .id_table = pxa2xx_spi_pci_devices,
+ .probe = pxa2xx_spi_pci_probe,
+ .remove = pxa2xx_spi_pci_remove,
};
-module_pci_driver(ce4100_spi_driver);
+module_pci_driver(pxa2xx_spi_pci_driver);
-MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver");
+MODULE_DESCRIPTION("CE4100/LPSS PCI-SPI glue code for PXA's driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
new file mode 100644
index 00000000000..e8a26f25d5c
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -0,0 +1,489 @@
+/*
+ * PXA2xx SPI private DMA support.
+ *
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
+
+#include "spi-pxa2xx.h"
+
+#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
+#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
+
+bool pxa2xx_spi_dma_is_possible(size_t len)
+{
+ /* Try to map dma buffer and do a dma transfer if successful, but
+ * only if the length is non-zero and less than MAX_DMA_LEN.
+ *
+ * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
+ * of PIO instead. Care is needed above because the transfer may
+ * have have been passed with buffers that are already dma mapped.
+ * A zero-length transfer in PIO mode will not try to write/read
+ * to/from the buffers
+ *
+ * REVISIT large transfers are exactly where we most want to be
+ * using DMA. If this happens much, split those transfers into
+ * multiple DMA segments rather than forcing PIO.
+ */
+ return len > 0 && len <= MAX_DMA_LEN;
+}
+
+int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct device *dev = &msg->spi->dev;
+
+ if (!drv_data->cur_chip->enable_dma)
+ return 0;
+
+ if (msg->is_dma_mapped)
+ return drv_data->rx_dma && drv_data->tx_dma;
+
+ if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
+ return 0;
+
+ /* Modify setup if rx buffer is null */
+ if (drv_data->rx == NULL) {
+ *drv_data->null_dma_buf = 0;
+ drv_data->rx = drv_data->null_dma_buf;
+ drv_data->rx_map_len = 4;
+ } else
+ drv_data->rx_map_len = drv_data->len;
+
+
+ /* Modify setup if tx buffer is null */
+ if (drv_data->tx == NULL) {
+ *drv_data->null_dma_buf = 0;
+ drv_data->tx = drv_data->null_dma_buf;
+ drv_data->tx_map_len = 4;
+ } else
+ drv_data->tx_map_len = drv_data->len;
+
+ /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
+ * so we flush the cache *before* invalidating it, in case
+ * the tx and rx buffers overlap.
+ */
+ drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
+ drv_data->tx_map_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, drv_data->tx_dma))
+ return 0;
+
+ /* Stream map the rx buffer */
+ drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
+ drv_data->rx_map_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, drv_data->rx_dma)) {
+ dma_unmap_single(dev, drv_data->tx_dma,
+ drv_data->tx_map_len, DMA_TO_DEVICE);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
+{
+ struct device *dev;
+
+ if (!drv_data->dma_mapped)
+ return;
+
+ if (!drv_data->cur_msg->is_dma_mapped) {
+ dev = &drv_data->cur_msg->spi->dev;
+ dma_unmap_single(dev, drv_data->rx_dma,
+ drv_data->rx_map_len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, drv_data->tx_dma,
+ drv_data->tx_map_len, DMA_TO_DEVICE);
+ }
+
+ drv_data->dma_mapped = 0;
+}
+
+static int wait_ssp_rx_stall(void const __iomem *ioaddr)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
+ cpu_relax();
+
+ return limit;
+}
+
+static int wait_dma_channel_stop(int channel)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
+ cpu_relax();
+
+ return limit;
+}
+
+static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
+ const char *msg)
+{
+ void __iomem *reg = drv_data->ioaddr;
+
+ /* Stop and reset */
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ if (!pxa25x_ssp_comp(drv_data))
+ write_SSTO(0, reg);
+ pxa2xx_spi_flush(drv_data);
+ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+
+ pxa2xx_spi_unmap_dma_buffers(drv_data);
+
+ dev_err(&drv_data->pdev->dev, "%s\n", msg);
+
+ drv_data->cur_msg->state = ERROR_STATE;
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
+{
+ void __iomem *reg = drv_data->ioaddr;
+ struct spi_message *msg = drv_data->cur_msg;
+
+ /* Clear and disable interrupts on SSP and DMA channels*/
+ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+
+ if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_handler: dma rx channel stop failed\n");
+
+ if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_transfer: ssp rx stall failed\n");
+
+ pxa2xx_spi_unmap_dma_buffers(drv_data);
+
+ /* update the buffer pointer for the amount completed in dma */
+ drv_data->rx += drv_data->len -
+ (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
+
+ /* read trailing data from fifo, it does not matter how many
+ * bytes are in the fifo just read until buffer is full
+ * or fifo is empty, which ever occurs first */
+ drv_data->read(drv_data);
+
+ /* return count of what was actually read */
+ msg->actual_length += drv_data->len -
+ (drv_data->rx_end - drv_data->rx);
+
+ /* Transfer delays and chip select release are
+ * handled in pump_transfers or giveback
+ */
+
+ /* Move to next transfer */
+ msg->state = pxa2xx_spi_next_transfer(drv_data);
+
+ /* Schedule transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+void pxa2xx_spi_dma_handler(int channel, void *data)
+{
+ struct driver_data *drv_data = data;
+ u32 irq_status = DCSR(channel) & DMA_INT_MASK;
+
+ if (irq_status & DCSR_BUSERR) {
+
+ if (channel == drv_data->tx_channel)
+ pxa2xx_spi_dma_error_stop(drv_data,
+ "dma_handler: bad bus address on tx channel");
+ else
+ pxa2xx_spi_dma_error_stop(drv_data,
+ "dma_handler: bad bus address on rx channel");
+ return;
+ }
+
+ /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
+ if ((channel == drv_data->tx_channel)
+ && (irq_status & DCSR_ENDINTR)
+ && (drv_data->ssp_type == PXA25x_SSP)) {
+
+ /* Wait for rx to stall */
+ if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_handler: ssp rx stall failed\n");
+
+ /* finish this transfer, start the next */
+ pxa2xx_spi_dma_transfer_complete(drv_data);
+ }
+}
+
+irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
+{
+ u32 irq_status;
+ void __iomem *reg = drv_data->ioaddr;
+
+ irq_status = read_SSSR(reg) & drv_data->mask_sr;
+ if (irq_status & SSSR_ROR) {
+ pxa2xx_spi_dma_error_stop(drv_data,
+ "dma_transfer: fifo overrun");
+ return IRQ_HANDLED;
+ }
+
+ /* Check for false positive timeout */
+ if ((irq_status & SSSR_TINT)
+ && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
+ write_SSSR(SSSR_TINT, reg);
+ return IRQ_HANDLED;
+ }
+
+ if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
+
+ /* Clear and disable timeout interrupt, do the rest in
+ * dma_transfer_complete */
+ if (!pxa25x_ssp_comp(drv_data))
+ write_SSTO(0, reg);
+
+ /* finish this transfer, start the next */
+ pxa2xx_spi_dma_transfer_complete(drv_data);
+
+ return IRQ_HANDLED;
+ }
+
+ /* Opps problem detected */
+ return IRQ_NONE;
+}
+
+int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
+{
+ u32 dma_width;
+
+ switch (drv_data->n_bytes) {
+ case 1:
+ dma_width = DCMD_WIDTH1;
+ break;
+ case 2:
+ dma_width = DCMD_WIDTH2;
+ break;
+ default:
+ dma_width = DCMD_WIDTH4;
+ break;
+ }
+
+ /* Setup rx DMA Channel */
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+ DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
+ DTADR(drv_data->rx_channel) = drv_data->rx_dma;
+ if (drv_data->rx == drv_data->null_dma_buf)
+ /* No target address increment */
+ DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+ else
+ DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
+ | DCMD_FLOWSRC
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+
+ /* Setup tx DMA Channel */
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ DSADR(drv_data->tx_channel) = drv_data->tx_dma;
+ DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
+ if (drv_data->tx == drv_data->null_dma_buf)
+ /* No source address increment */
+ DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+ else
+ DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
+ | DCMD_FLOWTRG
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+
+ /* Enable dma end irqs on SSP to detect end of transfer */
+ if (drv_data->ssp_type == PXA25x_SSP)
+ DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
+
+ return 0;
+}
+
+void pxa2xx_spi_dma_start(struct driver_data *drv_data)
+{
+ DCSR(drv_data->rx_channel) |= DCSR_RUN;
+ DCSR(drv_data->tx_channel) |= DCSR_RUN;
+}
+
+int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
+{
+ struct device *dev = &drv_data->pdev->dev;
+ struct ssp_device *ssp = drv_data->ssp;
+
+ /* Get two DMA channels (rx and tx) */
+ drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
+ DMA_PRIO_HIGH,
+ pxa2xx_spi_dma_handler,
+ drv_data);
+ if (drv_data->rx_channel < 0) {
+ dev_err(dev, "problem (%d) requesting rx channel\n",
+ drv_data->rx_channel);
+ return -ENODEV;
+ }
+ drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
+ DMA_PRIO_MEDIUM,
+ pxa2xx_spi_dma_handler,
+ drv_data);
+ if (drv_data->tx_channel < 0) {
+ dev_err(dev, "problem (%d) requesting tx channel\n",
+ drv_data->tx_channel);
+ pxa_free_dma(drv_data->rx_channel);
+ return -ENODEV;
+ }
+
+ DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
+ DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
+
+ return 0;
+}
+
+void pxa2xx_spi_dma_release(struct driver_data *drv_data)
+{
+ struct ssp_device *ssp = drv_data->ssp;
+
+ DRCMR(ssp->drcmr_rx) = 0;
+ DRCMR(ssp->drcmr_tx) = 0;
+
+ if (drv_data->tx_channel != 0)
+ pxa_free_dma(drv_data->tx_channel);
+ if (drv_data->rx_channel != 0)
+ pxa_free_dma(drv_data->rx_channel);
+}
+
+void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
+{
+ if (drv_data->rx_channel != -1)
+ DRCMR(drv_data->ssp->drcmr_rx) =
+ DRCMR_MAPVLD | drv_data->rx_channel;
+ if (drv_data->tx_channel != -1)
+ DRCMR(drv_data->ssp->drcmr_tx) =
+ DRCMR_MAPVLD | drv_data->tx_channel;
+}
+
+int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word, u32 *burst_code,
+ u32 *threshold)
+{
+ struct pxa2xx_spi_chip *chip_info =
+ (struct pxa2xx_spi_chip *)spi->controller_data;
+ int bytes_per_word;
+ int burst_bytes;
+ int thresh_words;
+ int req_burst_size;
+ int retval = 0;
+
+ /* Set the threshold (in registers) to equal the same amount of data
+ * as represented by burst size (in bytes). The computation below
+ * is (burst_size rounded up to nearest 8 byte, word or long word)
+ * divided by (bytes/register); the tx threshold is the inverse of
+ * the rx, so that there will always be enough data in the rx fifo
+ * to satisfy a burst, and there will always be enough space in the
+ * tx fifo to accept a burst (a tx burst will overwrite the fifo if
+ * there is not enough space), there must always remain enough empty
+ * space in the rx fifo for any data loaded to the tx fifo.
+ * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
+ * will be 8, or half the fifo;
+ * The threshold can only be set to 2, 4 or 8, but not 16, because
+ * to burst 16 to the tx fifo, the fifo would have to be empty;
+ * however, the minimum fifo trigger level is 1, and the tx will
+ * request service when the fifo is at this level, with only 15 spaces.
+ */
+
+ /* find bytes/word */
+ if (bits_per_word <= 8)
+ bytes_per_word = 1;
+ else if (bits_per_word <= 16)
+ bytes_per_word = 2;
+ else
+ bytes_per_word = 4;
+
+ /* use struct pxa2xx_spi_chip->dma_burst_size if available */
+ if (chip_info)
+ req_burst_size = chip_info->dma_burst_size;
+ else {
+ switch (chip->dma_burst_size) {
+ default:
+ /* if the default burst size is not set,
+ * do it now */
+ chip->dma_burst_size = DCMD_BURST8;
+ case DCMD_BURST8:
+ req_burst_size = 8;
+ break;
+ case DCMD_BURST16:
+ req_burst_size = 16;
+ break;
+ case DCMD_BURST32:
+ req_burst_size = 32;
+ break;
+ }
+ }
+ if (req_burst_size <= 8) {
+ *burst_code = DCMD_BURST8;
+ burst_bytes = 8;
+ } else if (req_burst_size <= 16) {
+ if (bytes_per_word == 1) {
+ /* don't burst more than 1/2 the fifo */
+ *burst_code = DCMD_BURST8;
+ burst_bytes = 8;
+ retval = 1;
+ } else {
+ *burst_code = DCMD_BURST16;
+ burst_bytes = 16;
+ }
+ } else {
+ if (bytes_per_word == 1) {
+ /* don't burst more than 1/2 the fifo */
+ *burst_code = DCMD_BURST8;
+ burst_bytes = 8;
+ retval = 1;
+ } else if (bytes_per_word == 2) {
+ /* don't burst more than 1/2 the fifo */
+ *burst_code = DCMD_BURST16;
+ burst_bytes = 16;
+ retval = 1;
+ } else {
+ *burst_code = DCMD_BURST32;
+ burst_bytes = 32;
+ }
+ }
+
+ thresh_words = burst_bytes / bytes_per_word;
+
+ /* thresh_words will be between 2 and 8 */
+ *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
+ | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
+
+ return retval;
+}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index dc25bee8d33..fe792106bdc 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ * Copyright (C) 2013, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,20 +22,23 @@
#include <linux/device.h>
#include <linux/ioport.h>
#include <linux/errno.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
-#include <linux/dma-mapping.h>
#include <linux/spi/spi.h>
-#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/delay.h>
+#include "spi-pxa2xx.h"
MODULE_AUTHOR("Stephen Street");
MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
@@ -45,12 +49,6 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define TIMOUT_DFLT 1000
-#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
-#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
-#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
-#define MAX_DMA_LEN 8191
-#define DMA_ALIGNMENT 8
-
/*
* for testing SSCR1 changes that require SSP restart, basically
* everything except the service and interrupt enables, the pxa270 developer
@@ -65,115 +63,113 @@ MODULE_ALIAS("platform:pxa2xx-spi");
| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
-#define DEFINE_SSP_REG(reg, off) \
-static inline u32 read_##reg(void const __iomem *p) \
-{ return __raw_readl(p + (off)); } \
-\
-static inline void write_##reg(u32 v, void __iomem *p) \
-{ __raw_writel(v, p + (off)); }
-
-DEFINE_SSP_REG(SSCR0, 0x00)
-DEFINE_SSP_REG(SSCR1, 0x04)
-DEFINE_SSP_REG(SSSR, 0x08)
-DEFINE_SSP_REG(SSITR, 0x0c)
-DEFINE_SSP_REG(SSDR, 0x10)
-DEFINE_SSP_REG(SSTO, 0x28)
-DEFINE_SSP_REG(SSPSP, 0x2c)
-
-#define START_STATE ((void*)0)
-#define RUNNING_STATE ((void*)1)
-#define DONE_STATE ((void*)2)
-#define ERROR_STATE ((void*)-1)
-
-#define QUEUE_RUNNING 0
-#define QUEUE_STOPPED 1
-
-struct driver_data {
- /* Driver model hookup */
- struct platform_device *pdev;
-
- /* SSP Info */
- struct ssp_device *ssp;
+#define LPSS_RX_THRESH_DFLT 64
+#define LPSS_TX_LOTHRESH_DFLT 160
+#define LPSS_TX_HITHRESH_DFLT 224
- /* SPI framework hookup */
- enum pxa_ssp_type ssp_type;
- struct spi_master *master;
+/* Offset from drv_data->lpss_base */
+#define GENERAL_REG 0x08
+#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
+#define SSP_REG 0x0c
+#define SPI_CS_CONTROL 0x18
+#define SPI_CS_CONTROL_SW_MODE BIT(0)
+#define SPI_CS_CONTROL_CS_HIGH BIT(1)
- /* PXA hookup */
- struct pxa2xx_spi_master *master_info;
-
- /* DMA setup stuff */
- int rx_channel;
- int tx_channel;
- u32 *null_dma_buf;
-
- /* SSP register addresses */
- void __iomem *ioaddr;
- u32 ssdr_physical;
-
- /* SSP masks*/
- u32 dma_cr1;
- u32 int_cr1;
- u32 clear_sr;
- u32 mask_sr;
-
- /* Driver message queue */
- struct workqueue_struct *workqueue;
- struct work_struct pump_messages;
- spinlock_t lock;
- struct list_head queue;
- int busy;
- int run;
-
- /* Message Transfer pump */
- struct tasklet_struct pump_transfers;
-
- /* Current message transfer state info */
- struct spi_message* cur_msg;
- struct spi_transfer* cur_transfer;
- struct chip_data *cur_chip;
- size_t len;
- void *tx;
- void *tx_end;
- void *rx;
- void *rx_end;
- int dma_mapped;
- dma_addr_t rx_dma;
- dma_addr_t tx_dma;
- size_t rx_map_len;
- size_t tx_map_len;
- u8 n_bytes;
- u32 dma_width;
- int (*write)(struct driver_data *drv_data);
- int (*read)(struct driver_data *drv_data);
- irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
- void (*cs_control)(u32 command);
-};
+static bool is_lpss_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == LPSS_SSP;
+}
-struct chip_data {
- u32 cr0;
- u32 cr1;
- u32 psp;
- u32 timeout;
- u8 n_bytes;
- u32 dma_width;
- u32 dma_burst_size;
- u32 threshold;
- u32 dma_threshold;
- u8 enable_dma;
- u8 bits_per_word;
- u32 speed_hz;
- union {
- int gpio_cs;
- unsigned int frm;
- };
- int gpio_cs_inverted;
- int (*write)(struct driver_data *drv_data);
- int (*read)(struct driver_data *drv_data);
- void (*cs_control)(u32 command);
-};
+/*
+ * Read and write LPSS SSP private registers. Caller must first check that
+ * is_lpss_ssp() returns true before these can be called.
+ */
+static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
+{
+ WARN_ON(!drv_data->lpss_base);
+ return readl(drv_data->lpss_base + offset);
+}
+
+static void __lpss_ssp_write_priv(struct driver_data *drv_data,
+ unsigned offset, u32 value)
+{
+ WARN_ON(!drv_data->lpss_base);
+ writel(value, drv_data->lpss_base + offset);
+}
-static void pump_messages(struct work_struct *work);
+/*
+ * lpss_ssp_setup - perform LPSS SSP specific setup
+ * @drv_data: pointer to the driver private data
+ *
+ * Perform LPSS SSP specific setup. This function must be called first if
+ * one is going to use LPSS SSP private registers.
+ */
+static void lpss_ssp_setup(struct driver_data *drv_data)
+{
+ unsigned offset = 0x400;
+ u32 value, orig;
+
+ if (!is_lpss_ssp(drv_data))
+ return;
+
+ /*
+ * Perform auto-detection of the LPSS SSP private registers. They
+ * can be either at 1k or 2k offset from the base address.
+ */
+ orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+
+ /* Test SPI_CS_CONTROL_SW_MODE bit enabling */
+ value = orig | SPI_CS_CONTROL_SW_MODE;
+ writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ if (value != (orig | SPI_CS_CONTROL_SW_MODE)) {
+ offset = 0x800;
+ goto detection_done;
+ }
+
+ orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+
+ /* Test SPI_CS_CONTROL_SW_MODE bit disabling */
+ value = orig & ~SPI_CS_CONTROL_SW_MODE;
+ writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ if (value != (orig & ~SPI_CS_CONTROL_SW_MODE)) {
+ offset = 0x800;
+ goto detection_done;
+ }
+
+detection_done:
+ /* Now set the LPSS base */
+ drv_data->lpss_base = drv_data->ioaddr + offset;
+
+ /* Enable software chip select control */
+ value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
+
+ /* Enable multiblock DMA transfers */
+ if (drv_data->master_info->enable_dma) {
+ __lpss_ssp_write_priv(drv_data, SSP_REG, 1);
+
+ value = __lpss_ssp_read_priv(drv_data, GENERAL_REG);
+ value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE;
+ __lpss_ssp_write_priv(drv_data, GENERAL_REG, value);
+ }
+}
+
+static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
+{
+ u32 value;
+
+ if (!is_lpss_ssp(drv_data))
+ return;
+
+ value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
+ if (enable)
+ value &= ~SPI_CS_CONTROL_CS_HIGH;
+ else
+ value |= SPI_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
+}
static void cs_assert(struct driver_data *drv_data)
{
@@ -189,8 +185,12 @@ static void cs_assert(struct driver_data *drv_data)
return;
}
- if (gpio_is_valid(chip->gpio_cs))
+ if (gpio_is_valid(chip->gpio_cs)) {
gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
+ return;
+ }
+
+ lpss_ssp_cs_control(drv_data, true);
}
static void cs_deassert(struct driver_data *drv_data)
@@ -205,30 +205,15 @@ static void cs_deassert(struct driver_data *drv_data)
return;
}
- if (gpio_is_valid(chip->gpio_cs))
+ if (gpio_is_valid(chip->gpio_cs)) {
gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
-}
-
-static void write_SSSR_CS(struct driver_data *drv_data, u32 val)
-{
- void __iomem *reg = drv_data->ioaddr;
-
- if (drv_data->ssp_type == CE4100_SSP)
- val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
-
- write_SSSR(val, reg);
-}
+ return;
+ }
-static int pxa25x_ssp_comp(struct driver_data *drv_data)
-{
- if (drv_data->ssp_type == PXA25x_SSP)
- return 1;
- if (drv_data->ssp_type == CE4100_SSP)
- return 1;
- return 0;
+ lpss_ssp_cs_control(drv_data, false);
}
-static int flush(struct driver_data *drv_data)
+int pxa2xx_spi_flush(struct driver_data *drv_data)
{
unsigned long limit = loops_per_jiffy << 1;
@@ -354,7 +339,7 @@ static int u32_reader(struct driver_data *drv_data)
return drv_data->rx == drv_data->rx_end;
}
-static void *next_transfer(struct driver_data *drv_data)
+void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
{
struct spi_message *msg = drv_data->cur_msg;
struct spi_transfer *trans = drv_data->cur_transfer;
@@ -370,92 +355,17 @@ static void *next_transfer(struct driver_data *drv_data)
return DONE_STATE;
}
-static int map_dma_buffers(struct driver_data *drv_data)
-{
- struct spi_message *msg = drv_data->cur_msg;
- struct device *dev = &msg->spi->dev;
-
- if (!drv_data->cur_chip->enable_dma)
- return 0;
-
- if (msg->is_dma_mapped)
- return drv_data->rx_dma && drv_data->tx_dma;
-
- if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
- return 0;
-
- /* Modify setup if rx buffer is null */
- if (drv_data->rx == NULL) {
- *drv_data->null_dma_buf = 0;
- drv_data->rx = drv_data->null_dma_buf;
- drv_data->rx_map_len = 4;
- } else
- drv_data->rx_map_len = drv_data->len;
-
-
- /* Modify setup if tx buffer is null */
- if (drv_data->tx == NULL) {
- *drv_data->null_dma_buf = 0;
- drv_data->tx = drv_data->null_dma_buf;
- drv_data->tx_map_len = 4;
- } else
- drv_data->tx_map_len = drv_data->len;
-
- /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
- * so we flush the cache *before* invalidating it, in case
- * the tx and rx buffers overlap.
- */
- drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
- drv_data->tx_map_len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, drv_data->tx_dma))
- return 0;
-
- /* Stream map the rx buffer */
- drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
- drv_data->rx_map_len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, drv_data->rx_dma)) {
- dma_unmap_single(dev, drv_data->tx_dma,
- drv_data->tx_map_len, DMA_TO_DEVICE);
- return 0;
- }
-
- return 1;
-}
-
-static void unmap_dma_buffers(struct driver_data *drv_data)
-{
- struct device *dev;
-
- if (!drv_data->dma_mapped)
- return;
-
- if (!drv_data->cur_msg->is_dma_mapped) {
- dev = &drv_data->cur_msg->spi->dev;
- dma_unmap_single(dev, drv_data->rx_dma,
- drv_data->rx_map_len, DMA_FROM_DEVICE);
- dma_unmap_single(dev, drv_data->tx_dma,
- drv_data->tx_map_len, DMA_TO_DEVICE);
- }
-
- drv_data->dma_mapped = 0;
-}
-
/* caller already set message->status; dma and pio irqs are blocked */
static void giveback(struct driver_data *drv_data)
{
struct spi_transfer* last_transfer;
- unsigned long flags;
struct spi_message *msg;
- spin_lock_irqsave(&drv_data->lock, flags);
msg = drv_data->cur_msg;
drv_data->cur_msg = NULL;
drv_data->cur_transfer = NULL;
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
- spin_unlock_irqrestore(&drv_data->lock, flags);
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer,
+ last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
transfer_list);
/* Delay if requested before any change in chip select */
@@ -481,13 +391,7 @@ static void giveback(struct driver_data *drv_data)
*/
/* get a pointer to the next message, if any */
- spin_lock_irqsave(&drv_data->lock, flags);
- if (list_empty(&drv_data->queue))
- next_msg = NULL;
- else
- next_msg = list_entry(drv_data->queue.next,
- struct spi_message, queue);
- spin_unlock_irqrestore(&drv_data->lock, flags);
+ next_msg = spi_get_next_queued_message(drv_data->master);
/* see if the next and current messages point
* to the same chip
@@ -498,168 +402,10 @@ static void giveback(struct driver_data *drv_data)
cs_deassert(drv_data);
}
- msg->state = NULL;
- if (msg->complete)
- msg->complete(msg->context);
-
+ spi_finalize_current_message(drv_data->master);
drv_data->cur_chip = NULL;
}
-static int wait_ssp_rx_stall(void const __iomem *ioaddr)
-{
- unsigned long limit = loops_per_jiffy << 1;
-
- while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
- cpu_relax();
-
- return limit;
-}
-
-static int wait_dma_channel_stop(int channel)
-{
- unsigned long limit = loops_per_jiffy << 1;
-
- while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
- cpu_relax();
-
- return limit;
-}
-
-static void dma_error_stop(struct driver_data *drv_data, const char *msg)
-{
- void __iomem *reg = drv_data->ioaddr;
-
- /* Stop and reset */
- DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
- DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
- write_SSSR_CS(drv_data, drv_data->clear_sr);
- write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
- if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(0, reg);
- flush(drv_data);
- write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
-
- unmap_dma_buffers(drv_data);
-
- dev_err(&drv_data->pdev->dev, "%s\n", msg);
-
- drv_data->cur_msg->state = ERROR_STATE;
- tasklet_schedule(&drv_data->pump_transfers);
-}
-
-static void dma_transfer_complete(struct driver_data *drv_data)
-{
- void __iomem *reg = drv_data->ioaddr;
- struct spi_message *msg = drv_data->cur_msg;
-
- /* Clear and disable interrupts on SSP and DMA channels*/
- write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
- write_SSSR_CS(drv_data, drv_data->clear_sr);
- DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
- DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
-
- if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_handler: dma rx channel stop failed\n");
-
- if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_transfer: ssp rx stall failed\n");
-
- unmap_dma_buffers(drv_data);
-
- /* update the buffer pointer for the amount completed in dma */
- drv_data->rx += drv_data->len -
- (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
-
- /* read trailing data from fifo, it does not matter how many
- * bytes are in the fifo just read until buffer is full
- * or fifo is empty, which ever occurs first */
- drv_data->read(drv_data);
-
- /* return count of what was actually read */
- msg->actual_length += drv_data->len -
- (drv_data->rx_end - drv_data->rx);
-
- /* Transfer delays and chip select release are
- * handled in pump_transfers or giveback
- */
-
- /* Move to next transfer */
- msg->state = next_transfer(drv_data);
-
- /* Schedule transfer tasklet */
- tasklet_schedule(&drv_data->pump_transfers);
-}
-
-static void dma_handler(int channel, void *data)
-{
- struct driver_data *drv_data = data;
- u32 irq_status = DCSR(channel) & DMA_INT_MASK;
-
- if (irq_status & DCSR_BUSERR) {
-
- if (channel == drv_data->tx_channel)
- dma_error_stop(drv_data,
- "dma_handler: "
- "bad bus address on tx channel");
- else
- dma_error_stop(drv_data,
- "dma_handler: "
- "bad bus address on rx channel");
- return;
- }
-
- /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
- if ((channel == drv_data->tx_channel)
- && (irq_status & DCSR_ENDINTR)
- && (drv_data->ssp_type == PXA25x_SSP)) {
-
- /* Wait for rx to stall */
- if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_handler: ssp rx stall failed\n");
-
- /* finish this transfer, start the next */
- dma_transfer_complete(drv_data);
- }
-}
-
-static irqreturn_t dma_transfer(struct driver_data *drv_data)
-{
- u32 irq_status;
- void __iomem *reg = drv_data->ioaddr;
-
- irq_status = read_SSSR(reg) & drv_data->mask_sr;
- if (irq_status & SSSR_ROR) {
- dma_error_stop(drv_data, "dma_transfer: fifo overrun");
- return IRQ_HANDLED;
- }
-
- /* Check for false positive timeout */
- if ((irq_status & SSSR_TINT)
- && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
- write_SSSR(SSSR_TINT, reg);
- return IRQ_HANDLED;
- }
-
- if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
-
- /* Clear and disable timeout interrupt, do the rest in
- * dma_transfer_complete */
- if (!pxa25x_ssp_comp(drv_data))
- write_SSTO(0, reg);
-
- /* finish this transfer, start the next */
- dma_transfer_complete(drv_data);
-
- return IRQ_HANDLED;
- }
-
- /* Opps problem detected */
- return IRQ_NONE;
-}
-
static void reset_sccr1(struct driver_data *drv_data)
{
void __iomem *reg = drv_data->ioaddr;
@@ -681,7 +427,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
reset_sccr1(drv_data);
if (!pxa25x_ssp_comp(drv_data))
write_SSTO(0, reg);
- flush(drv_data);
+ pxa2xx_spi_flush(drv_data);
write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
dev_err(&drv_data->pdev->dev, "%s\n", msg);
@@ -709,7 +455,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
*/
/* Move to next transfer */
- drv_data->cur_msg->state = next_transfer(drv_data);
+ drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
/* Schedule transfer tasklet */
tasklet_schedule(&drv_data->pump_transfers);
@@ -789,11 +535,30 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
{
struct driver_data *drv_data = dev_id;
void __iomem *reg = drv_data->ioaddr;
- u32 sccr1_reg = read_SSCR1(reg);
+ u32 sccr1_reg;
u32 mask = drv_data->mask_sr;
u32 status;
+ /*
+ * The IRQ might be shared with other peripherals so we must first
+ * check that are we RPM suspended or not. If we are we assume that
+ * the IRQ was not for us (we shouldn't be RPM suspended when the
+ * interrupt is enabled).
+ */
+ if (pm_runtime_suspended(&drv_data->pdev->dev))
+ return IRQ_NONE;
+
+ /*
+ * If the device is not yet in RPM suspended state and we get an
+ * interrupt that is meant for another device, check if status bits
+ * are all set to one. That means that the device is already
+ * powered off.
+ */
status = read_SSSR(reg);
+ if (status == ~0)
+ return IRQ_NONE;
+
+ sccr1_reg = read_SSCR1(reg);
/* Ignore possible writes if we don't need to write */
if (!(sccr1_reg & SSCR1_TIE))
@@ -810,8 +575,8 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
write_SSTO(0, reg);
write_SSSR_CS(drv_data, drv_data->clear_sr);
- dev_err(&drv_data->pdev->dev, "bad message state "
- "in interrupt handler\n");
+ dev_err(&drv_data->pdev->dev,
+ "bad message state in interrupt handler\n");
/* Never fail */
return IRQ_HANDLED;
@@ -820,106 +585,12 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
return drv_data->transfer_handler(drv_data);
}
-static int set_dma_burst_and_threshold(struct chip_data *chip,
- struct spi_device *spi,
- u8 bits_per_word, u32 *burst_code,
- u32 *threshold)
+static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
{
- struct pxa2xx_spi_chip *chip_info =
- (struct pxa2xx_spi_chip *)spi->controller_data;
- int bytes_per_word;
- int burst_bytes;
- int thresh_words;
- int req_burst_size;
- int retval = 0;
-
- /* Set the threshold (in registers) to equal the same amount of data
- * as represented by burst size (in bytes). The computation below
- * is (burst_size rounded up to nearest 8 byte, word or long word)
- * divided by (bytes/register); the tx threshold is the inverse of
- * the rx, so that there will always be enough data in the rx fifo
- * to satisfy a burst, and there will always be enough space in the
- * tx fifo to accept a burst (a tx burst will overwrite the fifo if
- * there is not enough space), there must always remain enough empty
- * space in the rx fifo for any data loaded to the tx fifo.
- * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
- * will be 8, or half the fifo;
- * The threshold can only be set to 2, 4 or 8, but not 16, because
- * to burst 16 to the tx fifo, the fifo would have to be empty;
- * however, the minimum fifo trigger level is 1, and the tx will
- * request service when the fifo is at this level, with only 15 spaces.
- */
+ unsigned long ssp_clk = drv_data->max_clk_rate;
+ const struct ssp_device *ssp = drv_data->ssp;
- /* find bytes/word */
- if (bits_per_word <= 8)
- bytes_per_word = 1;
- else if (bits_per_word <= 16)
- bytes_per_word = 2;
- else
- bytes_per_word = 4;
-
- /* use struct pxa2xx_spi_chip->dma_burst_size if available */
- if (chip_info)
- req_burst_size = chip_info->dma_burst_size;
- else {
- switch (chip->dma_burst_size) {
- default:
- /* if the default burst size is not set,
- * do it now */
- chip->dma_burst_size = DCMD_BURST8;
- case DCMD_BURST8:
- req_burst_size = 8;
- break;
- case DCMD_BURST16:
- req_burst_size = 16;
- break;
- case DCMD_BURST32:
- req_burst_size = 32;
- break;
- }
- }
- if (req_burst_size <= 8) {
- *burst_code = DCMD_BURST8;
- burst_bytes = 8;
- } else if (req_burst_size <= 16) {
- if (bytes_per_word == 1) {
- /* don't burst more than 1/2 the fifo */
- *burst_code = DCMD_BURST8;
- burst_bytes = 8;
- retval = 1;
- } else {
- *burst_code = DCMD_BURST16;
- burst_bytes = 16;
- }
- } else {
- if (bytes_per_word == 1) {
- /* don't burst more than 1/2 the fifo */
- *burst_code = DCMD_BURST8;
- burst_bytes = 8;
- retval = 1;
- } else if (bytes_per_word == 2) {
- /* don't burst more than 1/2 the fifo */
- *burst_code = DCMD_BURST16;
- burst_bytes = 16;
- retval = 1;
- } else {
- *burst_code = DCMD_BURST32;
- burst_bytes = 32;
- }
- }
-
- thresh_words = burst_bytes / bytes_per_word;
-
- /* thresh_words will be between 2 and 8 */
- *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
- | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
-
- return retval;
-}
-
-static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate)
-{
- unsigned long ssp_clk = clk_get_rate(ssp->clk);
+ rate = min_t(int, ssp_clk, rate);
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8;
@@ -934,7 +605,6 @@ static void pump_transfers(unsigned long data)
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
struct chip_data *chip = NULL;
- struct ssp_device *ssp = drv_data->ssp;
void __iomem *reg = drv_data->ioaddr;
u32 clk_div = 0;
u8 bits = 0;
@@ -976,15 +646,15 @@ static void pump_transfers(unsigned long data)
cs_deassert(drv_data);
}
- /* Check for transfers that need multiple DMA segments */
- if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
+ /* Check if we can DMA this transfer */
+ if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) {
/* reject already-mapped transfers; PIO won't always work */
if (message->is_dma_mapped
|| transfer->rx_dma || transfer->tx_dma) {
dev_err(&drv_data->pdev->dev,
- "pump_transfers: mapped transfer length "
- "of %u is greater than %d\n",
+ "pump_transfers: mapped transfer length of "
+ "%u is greater than %d\n",
transfer->len, MAX_DMA_LEN);
message->status = -EINVAL;
giveback(drv_data);
@@ -992,29 +662,27 @@ static void pump_transfers(unsigned long data)
}
/* warn ... we force this to PIO mode */
- if (printk_ratelimit())
- dev_warn(&message->spi->dev, "pump_transfers: "
- "DMA disabled for transfer length %ld "
- "greater than %d\n",
- (long)drv_data->len, MAX_DMA_LEN);
+ dev_warn_ratelimited(&message->spi->dev,
+ "pump_transfers: DMA disabled for transfer length %ld "
+ "greater than %d\n",
+ (long)drv_data->len, MAX_DMA_LEN);
}
/* Setup the transfer state based on the type of transfer */
- if (flush(drv_data) == 0) {
+ if (pxa2xx_spi_flush(drv_data) == 0) {
dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
message->status = -EIO;
giveback(drv_data);
return;
}
drv_data->n_bytes = chip->n_bytes;
- drv_data->dma_width = chip->dma_width;
drv_data->tx = (void *)transfer->tx_buf;
drv_data->tx_end = drv_data->tx + transfer->len;
drv_data->rx = transfer->rx_buf;
drv_data->rx_end = drv_data->rx + transfer->len;
drv_data->rx_dma = transfer->rx_dma;
drv_data->tx_dma = transfer->tx_dma;
- drv_data->len = transfer->len & DCMD_LENGTH;
+ drv_data->len = transfer->len;
drv_data->write = drv_data->tx ? chip->write : null_writer;
drv_data->read = drv_data->rx ? chip->read : null_reader;
@@ -1031,25 +699,22 @@ static void pump_transfers(unsigned long data)
if (transfer->bits_per_word)
bits = transfer->bits_per_word;
- clk_div = ssp_get_clk_div(ssp, speed);
+ clk_div = ssp_get_clk_div(drv_data, speed);
if (bits <= 8) {
drv_data->n_bytes = 1;
- drv_data->dma_width = DCMD_WIDTH1;
drv_data->read = drv_data->read != null_reader ?
u8_reader : null_reader;
drv_data->write = drv_data->write != null_writer ?
u8_writer : null_writer;
} else if (bits <= 16) {
drv_data->n_bytes = 2;
- drv_data->dma_width = DCMD_WIDTH2;
drv_data->read = drv_data->read != null_reader ?
u16_reader : null_reader;
drv_data->write = drv_data->write != null_writer ?
u16_writer : null_writer;
} else if (bits <= 32) {
drv_data->n_bytes = 4;
- drv_data->dma_width = DCMD_WIDTH4;
drv_data->read = drv_data->read != null_reader ?
u32_reader : null_reader;
drv_data->write = drv_data->write != null_writer ?
@@ -1058,14 +723,12 @@ static void pump_transfers(unsigned long data)
/* if bits/word is changed in dma mode, then must check the
* thresholds and burst also */
if (chip->enable_dma) {
- if (set_dma_burst_and_threshold(chip, message->spi,
+ if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
+ message->spi,
bits, &dma_burst,
&dma_thresh))
- if (printk_ratelimit())
- dev_warn(&message->spi->dev,
- "pump_transfers: "
- "DMA burst size reduced to "
- "match bits_per_word\n");
+ dev_warn_ratelimited(&message->spi->dev,
+ "pump_transfers: DMA burst size reduced to match bits_per_word\n");
}
cr0 = clk_div
@@ -1077,70 +740,21 @@ static void pump_transfers(unsigned long data)
message->state = RUNNING_STATE;
- /* Try to map dma buffer and do a dma transfer if successful, but
- * only if the length is non-zero and less than MAX_DMA_LEN.
- *
- * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
- * of PIO instead. Care is needed above because the transfer may
- * have have been passed with buffers that are already dma mapped.
- * A zero-length transfer in PIO mode will not try to write/read
- * to/from the buffers
- *
- * REVISIT large transfers are exactly where we most want to be
- * using DMA. If this happens much, split those transfers into
- * multiple DMA segments rather than forcing PIO.
- */
drv_data->dma_mapped = 0;
- if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN)
- drv_data->dma_mapped = map_dma_buffers(drv_data);
+ if (pxa2xx_spi_dma_is_possible(drv_data->len))
+ drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data);
if (drv_data->dma_mapped) {
/* Ensure we have the correct interrupt handler */
- drv_data->transfer_handler = dma_transfer;
-
- /* Setup rx DMA Channel */
- DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
- DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
- DTADR(drv_data->rx_channel) = drv_data->rx_dma;
- if (drv_data->rx == drv_data->null_dma_buf)
- /* No target address increment */
- DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
- | drv_data->dma_width
- | dma_burst
- | drv_data->len;
- else
- DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
- | DCMD_FLOWSRC
- | drv_data->dma_width
- | dma_burst
- | drv_data->len;
-
- /* Setup tx DMA Channel */
- DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
- DSADR(drv_data->tx_channel) = drv_data->tx_dma;
- DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
- if (drv_data->tx == drv_data->null_dma_buf)
- /* No source address increment */
- DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
- | drv_data->dma_width
- | dma_burst
- | drv_data->len;
- else
- DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
- | DCMD_FLOWTRG
- | drv_data->dma_width
- | dma_burst
- | drv_data->len;
-
- /* Enable dma end irqs on SSP to detect end of transfer */
- if (drv_data->ssp_type == PXA25x_SSP)
- DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
+ drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
+
+ pxa2xx_spi_dma_prepare(drv_data, dma_burst);
/* Clear status and start DMA engine */
cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
write_SSSR(drv_data->clear_sr, reg);
- DCSR(drv_data->rx_channel) |= DCSR_RUN;
- DCSR(drv_data->tx_channel) |= DCSR_RUN;
+
+ pxa2xx_spi_dma_start(drv_data);
} else {
/* Ensure we have the correct interrupt handler */
drv_data->transfer_handler = interrupt_transfer;
@@ -1150,6 +764,13 @@ static void pump_transfers(unsigned long data)
write_SSSR_CS(drv_data, drv_data->clear_sr);
}
+ if (is_lpss_ssp(drv_data)) {
+ if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold)
+ write_SSIRF(chip->lpss_rx_threshold, reg);
+ if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold)
+ write_SSITF(chip->lpss_tx_threshold, reg);
+ }
+
/* see if we need to reload the config registers */
if ((read_SSCR0(reg) != cr0)
|| (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
@@ -1176,31 +797,12 @@ static void pump_transfers(unsigned long data)
write_SSCR1(cr1, reg);
}
-static void pump_messages(struct work_struct *work)
+static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct driver_data *drv_data =
- container_of(work, struct driver_data, pump_messages);
- unsigned long flags;
-
- /* Lock queue and check for queue work */
- spin_lock_irqsave(&drv_data->lock, flags);
- if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
- drv_data->busy = 0;
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return;
- }
-
- /* Make sure we are not already running a message */
- if (drv_data->cur_msg) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return;
- }
-
- /* Extract head of queue */
- drv_data->cur_msg = list_entry(drv_data->queue.next,
- struct spi_message, queue);
- list_del_init(&drv_data->cur_msg->queue);
+ struct driver_data *drv_data = spi_master_get_devdata(master);
+ drv_data->cur_msg = msg;
/* Initial message state*/
drv_data->cur_msg->state = START_STATE;
drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
@@ -1213,33 +815,16 @@ static void pump_messages(struct work_struct *work)
/* Mark as busy and launch transfers */
tasklet_schedule(&drv_data->pump_transfers);
-
- drv_data->busy = 1;
- spin_unlock_irqrestore(&drv_data->lock, flags);
+ return 0;
}
-static int transfer(struct spi_device *spi, struct spi_message *msg)
+static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
{
- struct driver_data *drv_data = spi_master_get_devdata(spi->master);
- unsigned long flags;
-
- spin_lock_irqsave(&drv_data->lock, flags);
-
- if (drv_data->run == QUEUE_STOPPED) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return -ESHUTDOWN;
- }
-
- msg->actual_length = 0;
- msg->status = -EINPROGRESS;
- msg->state = START_STATE;
+ struct driver_data *drv_data = spi_master_get_devdata(master);
- list_add_tail(&msg->queue, &drv_data->queue);
-
- if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
-
- spin_unlock_irqrestore(&drv_data->lock, flags);
+ /* Disable the SSP now */
+ write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE,
+ drv_data->ioaddr);
return 0;
}
@@ -1267,8 +852,8 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
if (gpio_is_valid(chip_info->gpio_cs)) {
err = gpio_request(chip_info->gpio_cs, "SPI_CS");
if (err) {
- dev_err(&spi->dev, "failed to request chip select "
- "GPIO%d\n", chip_info->gpio_cs);
+ dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
+ chip_info->gpio_cs);
return err;
}
@@ -1287,40 +872,30 @@ static int setup(struct spi_device *spi)
struct pxa2xx_spi_chip *chip_info = NULL;
struct chip_data *chip;
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
- struct ssp_device *ssp = drv_data->ssp;
unsigned int clk_div;
- uint tx_thres = TX_THRESH_DFLT;
- uint rx_thres = RX_THRESH_DFLT;
-
- if (!pxa25x_ssp_comp(drv_data)
- && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
- dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
- "b/w not 4-32 for type non-PXA25x_SSP\n",
- drv_data->ssp_type, spi->bits_per_word);
- return -EINVAL;
- } else if (pxa25x_ssp_comp(drv_data)
- && (spi->bits_per_word < 4
- || spi->bits_per_word > 16)) {
- dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
- "b/w not 4-16 for type PXA25x_SSP\n",
- drv_data->ssp_type, spi->bits_per_word);
- return -EINVAL;
+ uint tx_thres, tx_hi_thres, rx_thres;
+
+ if (is_lpss_ssp(drv_data)) {
+ tx_thres = LPSS_TX_LOTHRESH_DFLT;
+ tx_hi_thres = LPSS_TX_HITHRESH_DFLT;
+ rx_thres = LPSS_RX_THRESH_DFLT;
+ } else {
+ tx_thres = TX_THRESH_DFLT;
+ tx_hi_thres = 0;
+ rx_thres = RX_THRESH_DFLT;
}
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
- if (!chip) {
- dev_err(&spi->dev,
- "failed setup: can't allocate chip data\n");
+ if (!chip)
return -ENOMEM;
- }
if (drv_data->ssp_type == CE4100_SSP) {
if (spi->chip_select > 4) {
- dev_err(&spi->dev, "failed setup: "
- "cs number must not be > 4.\n");
+ dev_err(&spi->dev,
+ "failed setup: cs number must not be > 4.\n");
kfree(chip);
return -EINVAL;
}
@@ -1330,8 +905,6 @@ static int setup(struct spi_device *spi)
chip->gpio_cs = -1;
chip->enable_dma = 0;
chip->timeout = TIMOUT_DFLT;
- chip->dma_burst_size = drv_data->master_info->enable_dma ?
- DCMD_BURST8 : 0;
}
/* protocol drivers may change the chip settings, so...
@@ -1345,31 +918,45 @@ static int setup(struct spi_device *spi)
chip->timeout = chip_info->timeout;
if (chip_info->tx_threshold)
tx_thres = chip_info->tx_threshold;
+ if (chip_info->tx_hi_threshold)
+ tx_hi_thres = chip_info->tx_hi_threshold;
if (chip_info->rx_threshold)
rx_thres = chip_info->rx_threshold;
chip->enable_dma = drv_data->master_info->enable_dma;
chip->dma_threshold = 0;
if (chip_info->enable_loopback)
chip->cr1 = SSCR1_LBM;
+ } else if (ACPI_HANDLE(&spi->dev)) {
+ /*
+ * Slave devices enumerated from ACPI namespace don't
+ * usually have chip_info but we still might want to use
+ * DMA with them.
+ */
+ chip->enable_dma = drv_data->master_info->enable_dma;
}
chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
+ chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
+ chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
+ | SSITF_TxHiThresh(tx_hi_thres);
+
/* set dma burst and threshold outside of chip_info path so that if
* chip_info goes away after setting chip->enable_dma, the
* burst and threshold can still respond to changes in bits_per_word */
if (chip->enable_dma) {
/* set up legal burst and threshold for dma */
- if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word,
+ if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
+ spi->bits_per_word,
&chip->dma_burst_size,
&chip->dma_threshold)) {
- dev_warn(&spi->dev, "in setup: DMA burst size reduced "
- "to match bits_per_word\n");
+ dev_warn(&spi->dev,
+ "in setup: DMA burst size reduced to match bits_per_word\n");
}
}
- clk_div = ssp_get_clk_div(ssp, spi->max_speed_hz);
+ clk_div = ssp_get_clk_div(drv_data, spi->max_speed_hz);
chip->speed_hz = spi->max_speed_hz;
chip->cr0 = clk_div
@@ -1382,37 +969,34 @@ static int setup(struct spi_device *spi)
chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
+ if (spi->mode & SPI_LOOP)
+ chip->cr1 |= SSCR1_LBM;
+
/* NOTE: PXA25x_SSP _could_ use external clocking ... */
if (!pxa25x_ssp_comp(drv_data))
dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
- clk_get_rate(ssp->clk)
+ drv_data->max_clk_rate
/ (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
chip->enable_dma ? "DMA" : "PIO");
else
dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
- clk_get_rate(ssp->clk) / 2
+ drv_data->max_clk_rate / 2
/ (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
chip->enable_dma ? "DMA" : "PIO");
if (spi->bits_per_word <= 8) {
chip->n_bytes = 1;
- chip->dma_width = DCMD_WIDTH1;
chip->read = u8_reader;
chip->write = u8_writer;
} else if (spi->bits_per_word <= 16) {
chip->n_bytes = 2;
- chip->dma_width = DCMD_WIDTH2;
chip->read = u16_reader;
chip->write = u16_writer;
} else if (spi->bits_per_word <= 32) {
chip->cr0 |= SSCR0_EDSS;
chip->n_bytes = 4;
- chip->dma_width = DCMD_WIDTH4;
chip->read = u32_reader;
chip->write = u32_writer;
- } else {
- dev_err(&spi->dev, "invalid wordsize\n");
- return -ENODEV;
}
chip->bits_per_word = spi->bits_per_word;
@@ -1438,95 +1022,70 @@ static void cleanup(struct spi_device *spi)
kfree(chip);
}
-static int __devinit init_queue(struct driver_data *drv_data)
+#ifdef CONFIG_ACPI
+static struct pxa2xx_spi_master *
+pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
{
- INIT_LIST_HEAD(&drv_data->queue);
- spin_lock_init(&drv_data->lock);
-
- drv_data->run = QUEUE_STOPPED;
- drv_data->busy = 0;
-
- tasklet_init(&drv_data->pump_transfers,
- pump_transfers, (unsigned long)drv_data);
-
- INIT_WORK(&drv_data->pump_messages, pump_messages);
- drv_data->workqueue = create_singlethread_workqueue(
- dev_name(drv_data->master->dev.parent));
- if (drv_data->workqueue == NULL)
- return -EBUSY;
-
- return 0;
-}
+ struct pxa2xx_spi_master *pdata;
+ struct acpi_device *adev;
+ struct ssp_device *ssp;
+ struct resource *res;
+ int devid;
-static int start_queue(struct driver_data *drv_data)
-{
- unsigned long flags;
+ if (!ACPI_HANDLE(&pdev->dev) ||
+ acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
+ return NULL;
- spin_lock_irqsave(&drv_data->lock, flags);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
- if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return -EBUSY;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return NULL;
- drv_data->run = QUEUE_RUNNING;
- drv_data->cur_msg = NULL;
- drv_data->cur_transfer = NULL;
- drv_data->cur_chip = NULL;
- spin_unlock_irqrestore(&drv_data->lock, flags);
-
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
-
- return 0;
-}
+ ssp = &pdata->ssp;
-static int stop_queue(struct driver_data *drv_data)
-{
- unsigned long flags;
- unsigned limit = 500;
- int status = 0;
+ ssp->phys_base = res->start;
+ ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ssp->mmio_base))
+ return NULL;
- spin_lock_irqsave(&drv_data->lock, flags);
-
- /* This is a bit lame, but is optimized for the common execution path.
- * A wait_queue on the drv_data->busy could be used, but then the common
- * execution path (pump_messages) would be required to call wake_up or
- * friends on every SPI message. Do this instead */
- drv_data->run = QUEUE_STOPPED;
- while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- msleep(10);
- spin_lock_irqsave(&drv_data->lock, flags);
- }
+ ssp->clk = devm_clk_get(&pdev->dev, NULL);
+ ssp->irq = platform_get_irq(pdev, 0);
+ ssp->type = LPSS_SSP;
+ ssp->pdev = pdev;
- if (!list_empty(&drv_data->queue) || drv_data->busy)
- status = -EBUSY;
+ ssp->port_id = -1;
+ if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid))
+ ssp->port_id = devid;
- spin_unlock_irqrestore(&drv_data->lock, flags);
+ pdata->num_chipselect = 1;
+ pdata->enable_dma = true;
+ pdata->tx_chan_id = -1;
+ pdata->rx_chan_id = -1;
- return status;
+ return pdata;
}
-static int destroy_queue(struct driver_data *drv_data)
+static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+ { "INT33C0", 0 },
+ { "INT33C1", 0 },
+ { "INT3430", 0 },
+ { "INT3431", 0 },
+ { "80860F0E", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+#else
+static inline struct pxa2xx_spi_master *
+pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
{
- int status;
-
- status = stop_queue(drv_data);
- /* we are unloading the module or failing to load (only two calls
- * to this routine), and neither call can handle a return value.
- * However, destroy_workqueue calls flush_workqueue, and that will
- * block until all work is done. If the reason that stop_queue
- * timed out is that the work will never finish, then it does no
- * good to call destroy_workqueue, so return anyway. */
- if (status != 0)
- return status;
-
- destroy_workqueue(drv_data->workqueue);
-
- return 0;
+ return NULL;
}
+#endif
-static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
+static int pxa2xx_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pxa2xx_spi_master *platform_info;
@@ -1535,11 +1094,21 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
struct ssp_device *ssp;
int status;
- platform_info = dev->platform_data;
+ platform_info = dev_get_platdata(dev);
+ if (!platform_info) {
+ platform_info = pxa2xx_spi_acpi_get_pdata(pdev);
+ if (!platform_info) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+ }
ssp = pxa_ssp_request(pdev->id, pdev->name);
- if (ssp == NULL) {
- dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id);
+ if (!ssp)
+ ssp = &platform_info->ssp;
+
+ if (!ssp->mmio_base) {
+ dev_err(&pdev->dev, "failed to get ssp\n");
return -ENODEV;
}
@@ -1559,29 +1128,32 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
master->dev.parent = &pdev->dev;
master->dev.of_node = pdev->dev.of_node;
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
- master->bus_num = pdev->id;
+ master->bus_num = ssp->port_id;
master->num_chipselect = platform_info->num_chipselect;
master->dma_alignment = DMA_ALIGNMENT;
master->cleanup = cleanup;
master->setup = setup;
- master->transfer = transfer;
+ master->transfer_one_message = pxa2xx_spi_transfer_one_message;
+ master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
+ master->auto_runtime_pm = true;
drv_data->ssp_type = ssp->type;
- drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
- sizeof(struct driver_data)), 8);
+ drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
drv_data->ioaddr = ssp->mmio_base;
drv_data->ssdr_physical = ssp->phys_base + SSDR;
if (pxa25x_ssp_comp(drv_data)) {
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
drv_data->dma_cr1 = 0;
drv_data->clear_sr = SSSR_ROR;
drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
} else {
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
- drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
+ drv_data->dma_cr1 = DEFAULT_DMA_CR1;
drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
}
@@ -1597,35 +1169,17 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->tx_channel = -1;
drv_data->rx_channel = -1;
if (platform_info->enable_dma) {
-
- /* Get two DMA channels (rx and tx) */
- drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
- DMA_PRIO_HIGH,
- dma_handler,
- drv_data);
- if (drv_data->rx_channel < 0) {
- dev_err(dev, "problem (%d) requesting rx channel\n",
- drv_data->rx_channel);
- status = -ENODEV;
- goto out_error_irq_alloc;
- }
- drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
- DMA_PRIO_MEDIUM,
- dma_handler,
- drv_data);
- if (drv_data->tx_channel < 0) {
- dev_err(dev, "problem (%d) requesting tx channel\n",
- drv_data->tx_channel);
- status = -ENODEV;
- goto out_error_dma_alloc;
+ status = pxa2xx_spi_dma_setup(drv_data);
+ if (status) {
+ dev_dbg(dev, "no DMA channels available, using PIO\n");
+ platform_info->enable_dma = false;
}
-
- DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
- DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
}
/* Enable SOC clock */
- clk_enable(ssp->clk);
+ clk_prepare_enable(ssp->clk);
+
+ drv_data->max_clk_rate = clk_get_rate(ssp->clk);
/* Load default SSP configuration */
write_SSCR0(0, drv_data->ioaddr);
@@ -1640,41 +1194,29 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
write_SSTO(0, drv_data->ioaddr);
write_SSPSP(0, drv_data->ioaddr);
- /* Initial and start queue */
- status = init_queue(drv_data);
- if (status != 0) {
- dev_err(&pdev->dev, "problem initializing queue\n");
- goto out_error_clock_enabled;
- }
- status = start_queue(drv_data);
- if (status != 0) {
- dev_err(&pdev->dev, "problem starting queue\n");
- goto out_error_clock_enabled;
- }
+ lpss_ssp_setup(drv_data);
+
+ tasklet_init(&drv_data->pump_transfers, pump_transfers,
+ (unsigned long)drv_data);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi master\n");
- goto out_error_queue_alloc;
+ goto out_error_clock_enabled;
}
return status;
-out_error_queue_alloc:
- destroy_queue(drv_data);
-
out_error_clock_enabled:
- clk_disable(ssp->clk);
-
-out_error_dma_alloc:
- if (drv_data->tx_channel != -1)
- pxa_free_dma(drv_data->tx_channel);
- if (drv_data->rx_channel != -1)
- pxa_free_dma(drv_data->rx_channel);
-
-out_error_irq_alloc:
+ clk_disable_unprepare(ssp->clk);
+ pxa2xx_spi_dma_release(drv_data);
free_irq(ssp->irq, drv_data);
out_error_master_alloc:
@@ -1687,37 +1229,23 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
{
struct driver_data *drv_data = platform_get_drvdata(pdev);
struct ssp_device *ssp;
- int status = 0;
if (!drv_data)
return 0;
ssp = drv_data->ssp;
- /* Remove the queue */
- status = destroy_queue(drv_data);
- if (status != 0)
- /* the kernel does not check the return status of this
- * this routine (mod->exit, within the kernel). Therefore
- * nothing is gained by returning from here, the module is
- * going away regardless, and we should not leave any more
- * resources allocated than necessary. We cannot free the
- * message memory in drv_data->queue, but we can release the
- * resources below. I think the kernel should honor -EBUSY
- * returns but... */
- dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not "
- "complete, message memory not freed\n");
+ pm_runtime_get_sync(&pdev->dev);
/* Disable the SSP at the peripheral and SOC level */
write_SSCR0(0, drv_data->ioaddr);
- clk_disable(ssp->clk);
+ clk_disable_unprepare(ssp->clk);
/* Release DMA */
- if (drv_data->master_info->enable_dma) {
- DRCMR(ssp->drcmr_rx) = 0;
- DRCMR(ssp->drcmr_tx) = 0;
- pxa_free_dma(drv_data->tx_channel);
- pxa_free_dma(drv_data->rx_channel);
- }
+ if (drv_data->master_info->enable_dma)
+ pxa2xx_spi_dma_release(drv_data);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
/* Release IRQ */
free_irq(ssp->irq, drv_data);
@@ -1725,12 +1253,6 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
/* Release SSP */
pxa_ssp_free(ssp);
- /* Disconnect from the SPI framework */
- spi_unregister_master(drv_data->master);
-
- /* Prevent double remove */
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
@@ -1742,18 +1264,18 @@ static void pxa2xx_spi_shutdown(struct platform_device *pdev)
dev_err(&pdev->dev, "shutdown failed with %d\n", status);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int pxa2xx_spi_suspend(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
- status = stop_queue(drv_data);
+ status = spi_master_suspend(drv_data->master);
if (status != 0)
return status;
write_SSCR0(0, drv_data->ioaddr);
- clk_disable(ssp->clk);
+ clk_disable_unprepare(ssp->clk);
return 0;
}
@@ -1764,18 +1286,16 @@ static int pxa2xx_spi_resume(struct device *dev)
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
- if (drv_data->rx_channel != -1)
- DRCMR(drv_data->ssp->drcmr_rx) =
- DRCMR_MAPVLD | drv_data->rx_channel;
- if (drv_data->tx_channel != -1)
- DRCMR(drv_data->ssp->drcmr_tx) =
- DRCMR_MAPVLD | drv_data->tx_channel;
+ pxa2xx_spi_dma_resume(drv_data);
/* Enable the SSP clock */
- clk_enable(ssp->clk);
+ clk_prepare_enable(ssp->clk);
+
+ /* Restore LPSS private register bits */
+ lpss_ssp_setup(drv_data);
/* Start the queue running */
- status = start_queue(drv_data);
+ status = spi_master_resume(drv_data->master);
if (status != 0) {
dev_err(dev, "problem starting queue (%d)\n", status);
return status;
@@ -1783,20 +1303,38 @@ static int pxa2xx_spi_resume(struct device *dev)
return 0;
}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int pxa2xx_spi_runtime_suspend(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(drv_data->ssp->clk);
+ return 0;
+}
+
+static int pxa2xx_spi_runtime_resume(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+
+ clk_prepare_enable(drv_data->ssp->clk);
+ return 0;
+}
+#endif
static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
- .suspend = pxa2xx_spi_suspend,
- .resume = pxa2xx_spi_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
+ SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
+ pxa2xx_spi_runtime_resume, NULL)
};
-#endif
static struct platform_driver driver = {
.driver = {
.name = "pxa2xx-spi",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &pxa2xx_spi_pm_ops,
-#endif
+ .acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
},
.probe = pxa2xx_spi_probe,
.remove = pxa2xx_spi_remove,
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
new file mode 100644
index 00000000000..5adc2a11c7b
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ * Copyright (C) 2013, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SPI_PXA2XX_H
+#define SPI_PXA2XX_H
+
+#include <linux/atomic.h>
+#include <linux/dmaengine.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
+
+struct driver_data {
+ /* Driver model hookup */
+ struct platform_device *pdev;
+
+ /* SSP Info */
+ struct ssp_device *ssp;
+
+ /* SPI framework hookup */
+ enum pxa_ssp_type ssp_type;
+ struct spi_master *master;
+
+ /* PXA hookup */
+ struct pxa2xx_spi_master *master_info;
+
+ /* PXA private DMA setup stuff */
+ int rx_channel;
+ int tx_channel;
+ u32 *null_dma_buf;
+
+ /* SSP register addresses */
+ void __iomem *ioaddr;
+ u32 ssdr_physical;
+
+ /* SSP masks*/
+ u32 dma_cr1;
+ u32 int_cr1;
+ u32 clear_sr;
+ u32 mask_sr;
+
+ /* Maximun clock rate */
+ unsigned long max_clk_rate;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* DMA engine support */
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+ struct sg_table rx_sgt;
+ struct sg_table tx_sgt;
+ int rx_nents;
+ int tx_nents;
+ void *dummy;
+ atomic_t dma_running;
+
+ /* Current message transfer state info */
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ int dma_mapped;
+ dma_addr_t rx_dma;
+ dma_addr_t tx_dma;
+ size_t rx_map_len;
+ size_t tx_map_len;
+ u8 n_bytes;
+ int (*write)(struct driver_data *drv_data);
+ int (*read)(struct driver_data *drv_data);
+ irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
+ void (*cs_control)(u32 command);
+
+ void __iomem *lpss_base;
+};
+
+struct chip_data {
+ u32 cr0;
+ u32 cr1;
+ u32 psp;
+ u32 timeout;
+ u8 n_bytes;
+ u32 dma_burst_size;
+ u32 threshold;
+ u32 dma_threshold;
+ u16 lpss_rx_threshold;
+ u16 lpss_tx_threshold;
+ u8 enable_dma;
+ u8 bits_per_word;
+ u32 speed_hz;
+ union {
+ int gpio_cs;
+ unsigned int frm;
+ };
+ int gpio_cs_inverted;
+ int (*write)(struct driver_data *drv_data);
+ int (*read)(struct driver_data *drv_data);
+ void (*cs_control)(u32 command);
+};
+
+#define DEFINE_SSP_REG(reg, off) \
+static inline u32 read_##reg(void const __iomem *p) \
+{ return __raw_readl(p + (off)); } \
+\
+static inline void write_##reg(u32 v, void __iomem *p) \
+{ __raw_writel(v, p + (off)); }
+
+DEFINE_SSP_REG(SSCR0, 0x00)
+DEFINE_SSP_REG(SSCR1, 0x04)
+DEFINE_SSP_REG(SSSR, 0x08)
+DEFINE_SSP_REG(SSITR, 0x0c)
+DEFINE_SSP_REG(SSDR, 0x10)
+DEFINE_SSP_REG(SSTO, 0x28)
+DEFINE_SSP_REG(SSPSP, 0x2c)
+DEFINE_SSP_REG(SSITF, SSITF)
+DEFINE_SSP_REG(SSIRF, SSIRF)
+
+#define START_STATE ((void *)0)
+#define RUNNING_STATE ((void *)1)
+#define DONE_STATE ((void *)2)
+#define ERROR_STATE ((void *)-1)
+
+#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT)
+#define DMA_ALIGNMENT 8
+
+static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
+{
+ if (drv_data->ssp_type == PXA25x_SSP)
+ return 1;
+ if (drv_data->ssp_type == CE4100_SSP)
+ return 1;
+ return 0;
+}
+
+static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
+{
+ void __iomem *reg = drv_data->ioaddr;
+
+ if (drv_data->ssp_type == CE4100_SSP)
+ val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
+
+ write_SSSR(val, reg);
+}
+
+extern int pxa2xx_spi_flush(struct driver_data *drv_data);
+extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
+
+/*
+ * Select the right DMA implementation.
+ */
+#if defined(CONFIG_SPI_PXA2XX_PXADMA)
+#define SPI_PXA2XX_USE_DMA 1
+#define MAX_DMA_LEN 8191
+#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE)
+#elif defined(CONFIG_SPI_PXA2XX_DMA)
+#define SPI_PXA2XX_USE_DMA 1
+#define MAX_DMA_LEN SZ_64K
+#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
+#else
+#undef SPI_PXA2XX_USE_DMA
+#define MAX_DMA_LEN 0
+#define DEFAULT_DMA_CR1 0
+#endif
+
+#ifdef SPI_PXA2XX_USE_DMA
+extern bool pxa2xx_spi_dma_is_possible(size_t len);
+extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data);
+extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
+extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst);
+extern void pxa2xx_spi_dma_start(struct driver_data *drv_data);
+extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data);
+extern void pxa2xx_spi_dma_release(struct driver_data *drv_data);
+extern void pxa2xx_spi_dma_resume(struct driver_data *drv_data);
+extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word,
+ u32 *burst_code,
+ u32 *threshold);
+#else
+static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; }
+static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
+{
+ return 0;
+}
+#define pxa2xx_spi_dma_transfer NULL
+static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
+ u32 dma_burst) {}
+static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {}
+static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
+{
+ return 0;
+}
+static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
+static inline void pxa2xx_spi_dma_resume(struct driver_data *drv_data) {}
+static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word,
+ u32 *burst_code,
+ u32 *threshold)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* SPI_PXA2XX_H */
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
new file mode 100644
index 00000000000..c08da380cb2
--- /dev/null
+++ b/drivers/spi/spi-qup.c
@@ -0,0 +1,761 @@
+/*
+ * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License rev 2 and
+ * only rev 2 as published by the free Software foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#define QUP_CONFIG 0x0000
+#define QUP_STATE 0x0004
+#define QUP_IO_M_MODES 0x0008
+#define QUP_SW_RESET 0x000c
+#define QUP_OPERATIONAL 0x0018
+#define QUP_ERROR_FLAGS 0x001c
+#define QUP_ERROR_FLAGS_EN 0x0020
+#define QUP_OPERATIONAL_MASK 0x0028
+#define QUP_HW_VERSION 0x0030
+#define QUP_MX_OUTPUT_CNT 0x0100
+#define QUP_OUTPUT_FIFO 0x0110
+#define QUP_MX_WRITE_CNT 0x0150
+#define QUP_MX_INPUT_CNT 0x0200
+#define QUP_MX_READ_CNT 0x0208
+#define QUP_INPUT_FIFO 0x0218
+
+#define SPI_CONFIG 0x0300
+#define SPI_IO_CONTROL 0x0304
+#define SPI_ERROR_FLAGS 0x0308
+#define SPI_ERROR_FLAGS_EN 0x030c
+
+/* QUP_CONFIG fields */
+#define QUP_CONFIG_SPI_MODE (1 << 8)
+#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
+#define QUP_CONFIG_NO_INPUT BIT(7)
+#define QUP_CONFIG_NO_OUTPUT BIT(6)
+#define QUP_CONFIG_N 0x001f
+
+/* QUP_STATE fields */
+#define QUP_STATE_VALID BIT(2)
+#define QUP_STATE_RESET 0
+#define QUP_STATE_RUN 1
+#define QUP_STATE_PAUSE 3
+#define QUP_STATE_MASK 3
+#define QUP_STATE_CLEAR 2
+
+#define QUP_HW_VERSION_2_1_1 0x20010001
+
+/* QUP_IO_M_MODES fields */
+#define QUP_IO_M_PACK_EN BIT(15)
+#define QUP_IO_M_UNPACK_EN BIT(14)
+#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
+#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
+#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
+#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
+
+#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
+#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
+#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
+#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
+
+#define QUP_IO_M_MODE_FIFO 0
+#define QUP_IO_M_MODE_BLOCK 1
+#define QUP_IO_M_MODE_DMOV 2
+#define QUP_IO_M_MODE_BAM 3
+
+/* QUP_OPERATIONAL fields */
+#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
+#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
+#define QUP_OP_IN_SERVICE_FLAG BIT(9)
+#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
+#define QUP_OP_IN_FIFO_FULL BIT(7)
+#define QUP_OP_OUT_FIFO_FULL BIT(6)
+#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
+#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
+
+/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
+#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
+#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
+#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
+#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
+
+/* SPI_CONFIG fields */
+#define SPI_CONFIG_HS_MODE BIT(10)
+#define SPI_CONFIG_INPUT_FIRST BIT(9)
+#define SPI_CONFIG_LOOPBACK BIT(8)
+
+/* SPI_IO_CONTROL fields */
+#define SPI_IO_C_FORCE_CS BIT(11)
+#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
+#define SPI_IO_C_MX_CS_MODE BIT(8)
+#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
+#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
+#define SPI_IO_C_CS_SELECT_MASK 0x000c
+#define SPI_IO_C_TRISTATE_CS BIT(1)
+#define SPI_IO_C_NO_TRI_STATE BIT(0)
+
+/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
+#define SPI_ERROR_CLK_OVER_RUN BIT(1)
+#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
+
+#define SPI_NUM_CHIPSELECTS 4
+
+/* high speed mode is when bus rate is greater then 26MHz */
+#define SPI_HS_MIN_RATE 26000000
+#define SPI_MAX_RATE 50000000
+
+#define SPI_DELAY_THRESHOLD 1
+#define SPI_DELAY_RETRY 10
+
+struct spi_qup {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *cclk; /* core clock */
+ struct clk *iclk; /* interface clock */
+ int irq;
+ spinlock_t lock;
+
+ int in_fifo_sz;
+ int out_fifo_sz;
+ int in_blk_sz;
+ int out_blk_sz;
+
+ struct spi_transfer *xfer;
+ struct completion done;
+ int error;
+ int w_size; /* bytes per SPI word */
+ int tx_bytes;
+ int rx_bytes;
+};
+
+
+static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
+{
+ u32 opstate = readl_relaxed(controller->base + QUP_STATE);
+
+ return opstate & QUP_STATE_VALID;
+}
+
+static int spi_qup_set_state(struct spi_qup *controller, u32 state)
+{
+ unsigned long loop;
+ u32 cur_state;
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ if (loop)
+ dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
+ loop, state);
+
+ cur_state = readl_relaxed(controller->base + QUP_STATE);
+ /*
+ * Per spec: for PAUSE_STATE to RESET_STATE, two writes
+ * of (b10) are required
+ */
+ if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
+ (state == QUP_STATE_RESET)) {
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ } else {
+ cur_state &= ~QUP_STATE_MASK;
+ cur_state |= state;
+ writel_relaxed(cur_state, controller->base + QUP_STATE);
+ }
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static void spi_qup_fifo_read(struct spi_qup *controller,
+ struct spi_transfer *xfer)
+{
+ u8 *rx_buf = xfer->rx_buf;
+ u32 word, state;
+ int idx, shift, w_size;
+
+ w_size = controller->w_size;
+
+ while (controller->rx_bytes < xfer->len) {
+
+ state = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
+ break;
+
+ word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
+
+ if (!rx_buf) {
+ controller->rx_bytes += w_size;
+ continue;
+ }
+
+ for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
+ /*
+ * The data format depends on bytes per SPI word:
+ * 4 bytes: 0x12345678
+ * 2 bytes: 0x00001234
+ * 1 byte : 0x00000012
+ */
+ shift = BITS_PER_BYTE;
+ shift *= (w_size - idx - 1);
+ rx_buf[controller->rx_bytes] = word >> shift;
+ }
+ }
+}
+
+static void spi_qup_fifo_write(struct spi_qup *controller,
+ struct spi_transfer *xfer)
+{
+ const u8 *tx_buf = xfer->tx_buf;
+ u32 word, state, data;
+ int idx, w_size;
+
+ w_size = controller->w_size;
+
+ while (controller->tx_bytes < xfer->len) {
+
+ state = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (state & QUP_OP_OUT_FIFO_FULL)
+ break;
+
+ word = 0;
+ for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
+
+ if (!tx_buf) {
+ controller->tx_bytes += w_size;
+ break;
+ }
+
+ data = tx_buf[controller->tx_bytes];
+ word |= data << (BITS_PER_BYTE * (3 - idx));
+ }
+
+ writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
+ }
+}
+
+static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
+{
+ struct spi_qup *controller = dev_id;
+ struct spi_transfer *xfer;
+ u32 opflags, qup_err, spi_err;
+ unsigned long flags;
+ int error = 0;
+
+ spin_lock_irqsave(&controller->lock, flags);
+ xfer = controller->xfer;
+ controller->xfer = NULL;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
+ spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
+ opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
+
+ writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
+ writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
+ writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
+
+ if (!xfer) {
+ dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
+ qup_err, spi_err, opflags);
+ return IRQ_HANDLED;
+ }
+
+ if (qup_err) {
+ if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
+ dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
+ dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
+ dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
+ dev_warn(controller->dev, "INPUT_OVER_RUN\n");
+
+ error = -EIO;
+ }
+
+ if (spi_err) {
+ if (spi_err & SPI_ERROR_CLK_OVER_RUN)
+ dev_warn(controller->dev, "CLK_OVER_RUN\n");
+ if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
+ dev_warn(controller->dev, "CLK_UNDER_RUN\n");
+
+ error = -EIO;
+ }
+
+ if (opflags & QUP_OP_IN_SERVICE_FLAG)
+ spi_qup_fifo_read(controller, xfer);
+
+ if (opflags & QUP_OP_OUT_SERVICE_FLAG)
+ spi_qup_fifo_write(controller, xfer);
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->error = error;
+ controller->xfer = xfer;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (controller->rx_bytes == xfer->len || error)
+ complete(&controller->done);
+
+ return IRQ_HANDLED;
+}
+
+
+/* set clock freq ... bits per word */
+static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(spi->master);
+ u32 config, iomode, mode;
+ int ret, n_words, w_size;
+
+ if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
+ dev_err(controller->dev, "too big size for loopback %d > %d\n",
+ xfer->len, controller->in_fifo_sz);
+ return -EIO;
+ }
+
+ ret = clk_set_rate(controller->cclk, xfer->speed_hz);
+ if (ret) {
+ dev_err(controller->dev, "fail to set frequency %d",
+ xfer->speed_hz);
+ return -EIO;
+ }
+
+ if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
+ dev_err(controller->dev, "cannot set RESET state\n");
+ return -EIO;
+ }
+
+ w_size = 4;
+ if (xfer->bits_per_word <= 8)
+ w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ w_size = 2;
+
+ n_words = xfer->len / w_size;
+ controller->w_size = w_size;
+
+ if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
+ mode = QUP_IO_M_MODE_FIFO;
+ writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
+ /* must be zero for FIFO */
+ writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
+ } else {
+ mode = QUP_IO_M_MODE_BLOCK;
+ writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
+ /* must be zero for BLOCK and BAM */
+ writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
+ }
+
+ iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
+ /* Set input and output transfer mode */
+ iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
+ iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
+ iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
+ iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
+
+ writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
+
+ config = readl_relaxed(controller->base + SPI_CONFIG);
+
+ if (spi->mode & SPI_LOOP)
+ config |= SPI_CONFIG_LOOPBACK;
+ else
+ config &= ~SPI_CONFIG_LOOPBACK;
+
+ if (spi->mode & SPI_CPHA)
+ config &= ~SPI_CONFIG_INPUT_FIRST;
+ else
+ config |= SPI_CONFIG_INPUT_FIRST;
+
+ /*
+ * HS_MODE improves signal stability for spi-clk high rates,
+ * but is invalid in loop back mode.
+ */
+ if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
+ config |= SPI_CONFIG_HS_MODE;
+ else
+ config &= ~SPI_CONFIG_HS_MODE;
+
+ writel_relaxed(config, controller->base + SPI_CONFIG);
+
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
+ config |= xfer->bits_per_word - 1;
+ config |= QUP_CONFIG_SPI_MODE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+
+ writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
+ return 0;
+}
+
+static int spi_qup_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ unsigned long timeout, flags;
+ int ret = -EIO;
+
+ ret = spi_qup_io_config(spi, xfer);
+ if (ret)
+ return ret;
+
+ timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
+ timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
+ timeout = 100 * msecs_to_jiffies(timeout);
+
+ reinit_completion(&controller->done);
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = xfer;
+ controller->error = 0;
+ controller->rx_bytes = 0;
+ controller->tx_bytes = 0;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
+ dev_warn(controller->dev, "cannot set RUN state\n");
+ goto exit;
+ }
+
+ if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
+ dev_warn(controller->dev, "cannot set PAUSE state\n");
+ goto exit;
+ }
+
+ spi_qup_fifo_write(controller, xfer);
+
+ if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
+ dev_warn(controller->dev, "cannot set EXECUTE state\n");
+ goto exit;
+ }
+
+ if (!wait_for_completion_timeout(&controller->done, timeout))
+ ret = -ETIMEDOUT;
+exit:
+ spi_qup_set_state(controller, QUP_STATE_RESET);
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = NULL;
+ if (!ret)
+ ret = controller->error;
+ spin_unlock_irqrestore(&controller->lock, flags);
+ return ret;
+}
+
+static int spi_qup_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct clk *iclk, *cclk;
+ struct spi_qup *controller;
+ struct resource *res;
+ struct device *dev;
+ void __iomem *base;
+ u32 data, max_freq, iomode;
+ int ret, irq, size;
+
+ dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ cclk = devm_clk_get(dev, "core");
+ if (IS_ERR(cclk))
+ return PTR_ERR(cclk);
+
+ iclk = devm_clk_get(dev, "iface");
+ if (IS_ERR(iclk))
+ return PTR_ERR(iclk);
+
+ /* This is optional parameter */
+ if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
+ max_freq = SPI_MAX_RATE;
+
+ if (!max_freq || max_freq > SPI_MAX_RATE) {
+ dev_err(dev, "invalid clock frequency %d\n", max_freq);
+ return -ENXIO;
+ }
+
+ ret = clk_prepare_enable(cclk);
+ if (ret) {
+ dev_err(dev, "cannot enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(iclk);
+ if (ret) {
+ clk_disable_unprepare(cclk);
+ dev_err(dev, "cannot enable iface clock\n");
+ return ret;
+ }
+
+ data = readl_relaxed(base + QUP_HW_VERSION);
+
+ if (data < QUP_HW_VERSION_2_1_1) {
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ dev_err(dev, "v.%08x is not supported\n", data);
+ return -ENXIO;
+ }
+
+ master = spi_alloc_master(dev, sizeof(struct spi_qup));
+ if (!master) {
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ dev_err(dev, "cannot allocate master\n");
+ return -ENOMEM;
+ }
+
+ /* use num-cs unless not present or out of range */
+ if (of_property_read_u16(dev->of_node, "num-cs",
+ &master->num_chipselect) ||
+ (master->num_chipselect > SPI_NUM_CHIPSELECTS))
+ master->num_chipselect = SPI_NUM_CHIPSELECTS;
+
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->max_speed_hz = max_freq;
+ master->transfer_one = spi_qup_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ controller = spi_master_get_devdata(master);
+
+ controller->dev = dev;
+ controller->base = base;
+ controller->iclk = iclk;
+ controller->cclk = cclk;
+ controller->irq = irq;
+
+ spin_lock_init(&controller->lock);
+ init_completion(&controller->done);
+
+ iomode = readl_relaxed(base + QUP_IO_M_MODES);
+
+ size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->out_blk_sz = size * 16;
+ else
+ controller->out_blk_sz = 4;
+
+ size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->in_blk_sz = size * 16;
+ else
+ controller->in_blk_sz = 4;
+
+ size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
+ controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
+
+ size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
+ controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
+
+ dev_info(dev, "v.%08x IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
+ data, controller->in_blk_sz, controller->in_fifo_sz,
+ controller->out_blk_sz, controller->out_fifo_sz);
+
+ writel_relaxed(1, base + QUP_SW_RESET);
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret) {
+ dev_err(dev, "cannot set RESET state\n");
+ goto error;
+ }
+
+ writel_relaxed(0, base + QUP_OPERATIONAL);
+ writel_relaxed(0, base + QUP_IO_M_MODES);
+ writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
+ writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
+ base + SPI_ERROR_FLAGS_EN);
+
+ writel_relaxed(0, base + SPI_CONFIG);
+ writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
+
+ ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
+ IRQF_TRIGGER_HIGH, pdev->name, controller);
+ if (ret)
+ goto error;
+
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto disable_pm;
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+error:
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ spi_master_put(master);
+ return ret;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int spi_qup_pm_suspend_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+
+ /* Enable clocks auto gaiting */
+ config = readl(controller->base + QUP_CONFIG);
+ config |= QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+ return 0;
+}
+
+static int spi_qup_pm_resume_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+
+ /* Disable clocks auto gaiting */
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+ return 0;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_qup_suspend(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ return 0;
+}
+
+static int spi_qup_resume(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(controller->iclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(controller->cclk);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int spi_qup_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = dev_get_drvdata(&pdev->dev);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id spi_qup_dt_match[] = {
+ { .compatible = "qcom,spi-qup-v2.1.1", },
+ { .compatible = "qcom,spi-qup-v2.2.1", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
+
+static const struct dev_pm_ops spi_qup_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
+ SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
+ spi_qup_pm_resume_runtime,
+ NULL)
+};
+
+static struct platform_driver spi_qup_driver = {
+ .driver = {
+ .name = "spi_qup",
+ .owner = THIS_MODULE,
+ .pm = &spi_qup_dev_pm_ops,
+ .of_match_table = spi_qup_dt_match,
+ },
+ .probe = spi_qup_probe,
+ .remove = spi_qup_remove,
+};
+module_platform_driver(spi_qup_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_qup");
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 4894bde4bbf..10112745bb1 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1,7 +1,8 @@
/*
* SH RSPI driver
*
- * Copyright (C) 2012 Renesas Solutions Corp.
+ * Copyright (C) 2012, 2013 Renesas Solutions Corp.
+ * Copyright (C) 2014 Glider bvba
*
* Based on spi-sh.c:
* Copyright (C) 2011 Renesas Solutions Corp.
@@ -25,225 +26,294 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
#include <linux/spi/spi.h>
#include <linux/spi/rspi.h>
-#define RSPI_SPCR 0x00
-#define RSPI_SSLP 0x01
-#define RSPI_SPPCR 0x02
-#define RSPI_SPSR 0x03
-#define RSPI_SPDR 0x04
-#define RSPI_SPSCR 0x08
-#define RSPI_SPSSR 0x09
-#define RSPI_SPBR 0x0a
-#define RSPI_SPDCR 0x0b
-#define RSPI_SPCKD 0x0c
-#define RSPI_SSLND 0x0d
-#define RSPI_SPND 0x0e
-#define RSPI_SPCR2 0x0f
-#define RSPI_SPCMD0 0x10
-#define RSPI_SPCMD1 0x12
-#define RSPI_SPCMD2 0x14
-#define RSPI_SPCMD3 0x16
-#define RSPI_SPCMD4 0x18
-#define RSPI_SPCMD5 0x1a
-#define RSPI_SPCMD6 0x1c
-#define RSPI_SPCMD7 0x1e
-
-/* SPCR */
-#define SPCR_SPRIE 0x80
-#define SPCR_SPE 0x40
-#define SPCR_SPTIE 0x20
-#define SPCR_SPEIE 0x10
-#define SPCR_MSTR 0x08
-#define SPCR_MODFEN 0x04
-#define SPCR_TXMD 0x02
-#define SPCR_SPMS 0x01
-
-/* SSLP */
-#define SSLP_SSL1P 0x02
-#define SSLP_SSL0P 0x01
-
-/* SPPCR */
-#define SPPCR_MOIFE 0x20
-#define SPPCR_MOIFV 0x10
+#define RSPI_SPCR 0x00 /* Control Register */
+#define RSPI_SSLP 0x01 /* Slave Select Polarity Register */
+#define RSPI_SPPCR 0x02 /* Pin Control Register */
+#define RSPI_SPSR 0x03 /* Status Register */
+#define RSPI_SPDR 0x04 /* Data Register */
+#define RSPI_SPSCR 0x08 /* Sequence Control Register */
+#define RSPI_SPSSR 0x09 /* Sequence Status Register */
+#define RSPI_SPBR 0x0a /* Bit Rate Register */
+#define RSPI_SPDCR 0x0b /* Data Control Register */
+#define RSPI_SPCKD 0x0c /* Clock Delay Register */
+#define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */
+#define RSPI_SPND 0x0e /* Next-Access Delay Register */
+#define RSPI_SPCR2 0x0f /* Control Register 2 (SH only) */
+#define RSPI_SPCMD0 0x10 /* Command Register 0 */
+#define RSPI_SPCMD1 0x12 /* Command Register 1 */
+#define RSPI_SPCMD2 0x14 /* Command Register 2 */
+#define RSPI_SPCMD3 0x16 /* Command Register 3 */
+#define RSPI_SPCMD4 0x18 /* Command Register 4 */
+#define RSPI_SPCMD5 0x1a /* Command Register 5 */
+#define RSPI_SPCMD6 0x1c /* Command Register 6 */
+#define RSPI_SPCMD7 0x1e /* Command Register 7 */
+#define RSPI_SPCMD(i) (RSPI_SPCMD0 + (i) * 2)
+#define RSPI_NUM_SPCMD 8
+#define RSPI_RZ_NUM_SPCMD 4
+#define QSPI_NUM_SPCMD 4
+
+/* RSPI on RZ only */
+#define RSPI_SPBFCR 0x20 /* Buffer Control Register */
+#define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */
+
+/* QSPI only */
+#define QSPI_SPBFCR 0x18 /* Buffer Control Register */
+#define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */
+#define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */
+#define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */
+#define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */
+#define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */
+#define QSPI_SPBMUL(i) (QSPI_SPBMUL0 + (i) * 4)
+
+/* SPCR - Control Register */
+#define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */
+#define SPCR_SPE 0x40 /* Function Enable */
+#define SPCR_SPTIE 0x20 /* Transmit Interrupt Enable */
+#define SPCR_SPEIE 0x10 /* Error Interrupt Enable */
+#define SPCR_MSTR 0x08 /* Master/Slave Mode Select */
+#define SPCR_MODFEN 0x04 /* Mode Fault Error Detection Enable */
+/* RSPI on SH only */
+#define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */
+#define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */
+/* QSPI on R-Car M2 only */
+#define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */
+#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
+
+/* SSLP - Slave Select Polarity Register */
+#define SSLP_SSL1P 0x02 /* SSL1 Signal Polarity Setting */
+#define SSLP_SSL0P 0x01 /* SSL0 Signal Polarity Setting */
+
+/* SPPCR - Pin Control Register */
+#define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */
+#define SPPCR_MOIFV 0x10 /* MOSI Idle Fixed Value */
#define SPPCR_SPOM 0x04
-#define SPPCR_SPLP2 0x02
-#define SPPCR_SPLP 0x01
-
-/* SPSR */
-#define SPSR_SPRF 0x80
-#define SPSR_SPTEF 0x20
-#define SPSR_PERF 0x08
-#define SPSR_MODF 0x04
-#define SPSR_IDLNF 0x02
-#define SPSR_OVRF 0x01
-
-/* SPSCR */
-#define SPSCR_SPSLN_MASK 0x07
-
-/* SPSSR */
-#define SPSSR_SPECM_MASK 0x70
-#define SPSSR_SPCP_MASK 0x07
-
-/* SPDCR */
-#define SPDCR_SPLW 0x20
-#define SPDCR_SPRDTD 0x10
+#define SPPCR_SPLP2 0x02 /* Loopback Mode 2 (non-inverting) */
+#define SPPCR_SPLP 0x01 /* Loopback Mode (inverting) */
+
+#define SPPCR_IO3FV 0x04 /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
+#define SPPCR_IO2FV 0x04 /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
+
+/* SPSR - Status Register */
+#define SPSR_SPRF 0x80 /* Receive Buffer Full Flag */
+#define SPSR_TEND 0x40 /* Transmit End */
+#define SPSR_SPTEF 0x20 /* Transmit Buffer Empty Flag */
+#define SPSR_PERF 0x08 /* Parity Error Flag */
+#define SPSR_MODF 0x04 /* Mode Fault Error Flag */
+#define SPSR_IDLNF 0x02 /* RSPI Idle Flag */
+#define SPSR_OVRF 0x01 /* Overrun Error Flag (RSPI only) */
+
+/* SPSCR - Sequence Control Register */
+#define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */
+
+/* SPSSR - Sequence Status Register */
+#define SPSSR_SPECM_MASK 0x70 /* Command Error Mask */
+#define SPSSR_SPCP_MASK 0x07 /* Command Pointer Mask */
+
+/* SPDCR - Data Control Register */
+#define SPDCR_TXDMY 0x80 /* Dummy Data Transmission Enable */
+#define SPDCR_SPLW1 0x40 /* Access Width Specification (RZ) */
+#define SPDCR_SPLW0 0x20 /* Access Width Specification (RZ) */
+#define SPDCR_SPLLWORD (SPDCR_SPLW1 | SPDCR_SPLW0)
+#define SPDCR_SPLWORD SPDCR_SPLW1
+#define SPDCR_SPLBYTE SPDCR_SPLW0
+#define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */
+#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select (SH) */
#define SPDCR_SLSEL1 0x08
#define SPDCR_SLSEL0 0x04
-#define SPDCR_SLSEL_MASK 0x0c
+#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select (SH) */
#define SPDCR_SPFC1 0x02
#define SPDCR_SPFC0 0x01
+#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) (SH) */
-/* SPCKD */
-#define SPCKD_SCKDL_MASK 0x07
+/* SPCKD - Clock Delay Register */
+#define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */
-/* SSLND */
-#define SSLND_SLNDL_MASK 0x07
+/* SSLND - Slave Select Negation Delay Register */
+#define SSLND_SLNDL_MASK 0x07 /* SSL Negation Delay Setting (1-8) */
-/* SPND */
-#define SPND_SPNDL_MASK 0x07
+/* SPND - Next-Access Delay Register */
+#define SPND_SPNDL_MASK 0x07 /* Next-Access Delay Setting (1-8) */
-/* SPCR2 */
-#define SPCR2_PTE 0x08
-#define SPCR2_SPIE 0x04
-#define SPCR2_SPOE 0x02
-#define SPCR2_SPPE 0x01
+/* SPCR2 - Control Register 2 */
+#define SPCR2_PTE 0x08 /* Parity Self-Test Enable */
+#define SPCR2_SPIE 0x04 /* Idle Interrupt Enable */
+#define SPCR2_SPOE 0x02 /* Odd Parity Enable (vs. Even) */
+#define SPCR2_SPPE 0x01 /* Parity Enable */
-/* SPCMDn */
-#define SPCMD_SCKDEN 0x8000
-#define SPCMD_SLNDEN 0x4000
-#define SPCMD_SPNDEN 0x2000
-#define SPCMD_LSBF 0x1000
-#define SPCMD_SPB_MASK 0x0f00
+/* SPCMDn - Command Registers */
+#define SPCMD_SCKDEN 0x8000 /* Clock Delay Setting Enable */
+#define SPCMD_SLNDEN 0x4000 /* SSL Negation Delay Setting Enable */
+#define SPCMD_SPNDEN 0x2000 /* Next-Access Delay Enable */
+#define SPCMD_LSBF 0x1000 /* LSB First */
+#define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */
#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
+#define SPCMD_SPB_8BIT 0x0000 /* QSPI only */
+#define SPCMD_SPB_16BIT 0x0100
#define SPCMD_SPB_20BIT 0x0000
#define SPCMD_SPB_24BIT 0x0100
#define SPCMD_SPB_32BIT 0x0200
-#define SPCMD_SSLKP 0x0080
-#define SPCMD_SSLA_MASK 0x0030
-#define SPCMD_BRDV_MASK 0x000c
-#define SPCMD_CPOL 0x0002
-#define SPCMD_CPHA 0x0001
+#define SPCMD_SSLKP 0x0080 /* SSL Signal Level Keeping */
+#define SPCMD_SPIMOD_MASK 0x0060 /* SPI Operating Mode (QSPI only) */
+#define SPCMD_SPIMOD1 0x0040
+#define SPCMD_SPIMOD0 0x0020
+#define SPCMD_SPIMOD_SINGLE 0
+#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
+#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
+#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
+#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */
+#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
+#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
+#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
+
+/* SPBFCR - Buffer Control Register */
+#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset */
+#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */
+#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
+#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
struct spi_master *master;
- struct list_head queue;
- struct work_struct ws;
wait_queue_head_t wait;
- spinlock_t lock;
struct clk *clk;
- unsigned char spsr;
+ u16 spcmd;
+ u8 spsr;
+ u8 sppcr;
+ int rx_irq, tx_irq;
+ const struct spi_ops *ops;
- /* for dmaengine */
- struct sh_dmae_slave dma_tx;
- struct sh_dmae_slave dma_rx;
- struct dma_chan *chan_tx;
- struct dma_chan *chan_rx;
- int irq;
-
- unsigned dma_width_16bit:1;
unsigned dma_callbacked:1;
+ unsigned byte_access:1;
};
-static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
+static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
{
iowrite8(data, rspi->addr + offset);
}
-static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
+static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
{
iowrite16(data, rspi->addr + offset);
}
-static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
+static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
{
- return ioread8(rspi->addr + offset);
+ iowrite32(data, rspi->addr + offset);
}
-static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
+static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
{
- return ioread16(rspi->addr + offset);
+ return ioread8(rspi->addr + offset);
}
-static unsigned char rspi_calc_spbr(struct rspi_data *rspi)
+static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
{
- int tmp;
- unsigned char spbr;
-
- tmp = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
- spbr = clamp(tmp, 0, 255);
-
- return spbr;
+ return ioread16(rspi->addr + offset);
}
-static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
+static void rspi_write_data(const struct rspi_data *rspi, u16 data)
{
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
+ if (rspi->byte_access)
+ rspi_write8(rspi, data, RSPI_SPDR);
+ else /* 16 bit */
+ rspi_write16(rspi, data, RSPI_SPDR);
}
-static void rspi_disable_irq(struct rspi_data *rspi, u8 disable)
+static u16 rspi_read_data(const struct rspi_data *rspi)
{
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
+ if (rspi->byte_access)
+ return rspi_read8(rspi, RSPI_SPDR);
+ else /* 16 bit */
+ return rspi_read16(rspi, RSPI_SPDR);
}
-static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
- u8 enable_bit)
+/* optional functions */
+struct spi_ops {
+ int (*set_config_register)(struct rspi_data *rspi, int access_size);
+ int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer);
+ u16 mode_bits;
+ u16 flags;
+ u16 fifo_size;
+};
+
+/*
+ * functions for RSPI on legacy SH
+ */
+static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
{
- int ret;
+ int spbr;
- rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
- rspi_enable_irq(rspi, enable_bit);
- ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
- if (ret == 0 && !(rspi->spsr & wait_mask))
- return -ETIMEDOUT;
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
- return 0;
-}
+ /* Sets transfer bit rate */
+ spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
+ 2 * rspi->max_speed_hz) - 1;
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
-static void rspi_assert_ssl(struct rspi_data *rspi)
-{
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
-}
+ /* Disable dummy transmission, set 16-bit word access, 1 frame */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 0;
-static void rspi_negate_ssl(struct rspi_data *rspi)
-{
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Sets parity, interrupt mask */
+ rspi_write8(rspi, 0x00, RSPI_SPCR2);
+
+ /* Sets SPCMD */
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
}
-static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
+/*
+ * functions for RSPI on RZ
+ */
+static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
{
- /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
- rspi_write8(rspi, 0x00, RSPI_SPPCR);
+ int spbr;
+
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
- rspi_write8(rspi, rspi_calc_spbr(rspi), RSPI_SPBR);
+ spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
+ 2 * rspi->max_speed_hz) - 1;
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
- /* Sets number of frames to be used: 1 frame */
- rspi_write8(rspi, 0x00, RSPI_SPDCR);
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
+ rspi->byte_access = 1;
/* Sets RSPCK, SSL, next-access delay value */
rspi_write8(rspi, 0x00, RSPI_SPCKD);
rspi_write8(rspi, 0x00, RSPI_SSLND);
rspi_write8(rspi, 0x00, RSPI_SPND);
- /* Sets parity, interrupt mask */
- rspi_write8(rspi, 0x00, RSPI_SPCR2);
-
/* Sets SPCMD */
- rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
- RSPI_SPCMD0);
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
/* Sets RSPI mode */
rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
@@ -251,131 +321,209 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
return 0;
}
-static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+/*
+ * functions for QSPI
+ */
+static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
{
- int remain = t->len;
- u8 *data;
+ int spbr;
- data = (u8 *)t->tx_buf;
- while (remain > 0) {
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
- RSPI_SPCR);
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
+ /* Sets transfer bit rate */
+ spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
- rspi_write16(rspi, *data, RSPI_SPDR);
- data++;
- remain--;
- }
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 1;
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Data Length Setting */
+ if (access_size == 8)
+ rspi->spcmd |= SPCMD_SPB_8BIT;
+ else if (access_size == 16)
+ rspi->spcmd |= SPCMD_SPB_16BIT;
+ else
+ rspi->spcmd |= SPCMD_SPB_32BIT;
+
+ rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
- /* Waiting for the last transmition */
- rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+ /* Resets transfer data length */
+ rspi_write32(rspi, 0, QSPI_SPBMUL0);
+
+ /* Resets transmit and receive buffer */
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
+ /* Sets buffer to allow normal operation */
+ rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+
+ /* Sets SPCMD */
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Enables SPI function in master mode */
+ rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
return 0;
}
-static void rspi_dma_complete(void *arg)
+#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
+
+static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
{
- struct rspi_data *rspi = arg;
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
+}
- rspi->dma_callbacked = 1;
- wake_up_interruptible(&rspi->wait);
+static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
+{
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
}
-static int rspi_dma_map_sg(struct scatterlist *sg, void *buf, unsigned len,
- struct dma_chan *chan,
- enum dma_transfer_direction dir)
+static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
+ u8 enable_bit)
{
- sg_init_table(sg, 1);
- sg_set_buf(sg, buf, len);
- sg_dma_len(sg) = len;
- return dma_map_sg(chan->device->dev, sg, 1, dir);
+ int ret;
+
+ rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (rspi->spsr & wait_mask)
+ return 0;
+
+ rspi_enable_irq(rspi, enable_bit);
+ ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
+ if (ret == 0 && !(rspi->spsr & wait_mask))
+ return -ETIMEDOUT;
+
+ return 0;
}
-static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan,
- enum dma_transfer_direction dir)
+static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi)
{
- dma_unmap_sg(chan->device->dev, sg, 1, dir);
+ return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
}
-static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len)
+static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
{
- u16 *dst = buf;
- const u8 *src = data;
+ return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
+}
- while (len) {
- *dst++ = (u16)(*src++);
- len--;
+static int rspi_data_out(struct rspi_data *rspi, u8 data)
+{
+ int error = rspi_wait_for_tx_empty(rspi);
+ if (error < 0) {
+ dev_err(&rspi->master->dev, "transmit timeout\n");
+ return error;
}
+ rspi_write_data(rspi, data);
+ return 0;
}
-static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
+static int rspi_data_in(struct rspi_data *rspi)
{
- u8 *dst = buf;
- const u16 *src = data;
+ int error;
+ u8 data;
- while (len) {
- *dst++ = (u8)*src++;
- len--;
+ error = rspi_wait_for_rx_full(rspi);
+ if (error < 0) {
+ dev_err(&rspi->master->dev, "receive timeout\n");
+ return error;
}
+ data = rspi_read_data(rspi);
+ return data;
}
-static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
-{
- struct scatterlist sg;
- void *buf = NULL;
- struct dma_async_tx_descriptor *desc;
- unsigned len;
- int ret = 0;
-
- if (rspi->dma_width_16bit) {
- /*
- * If DMAC bus width is 16-bit, the driver allocates a dummy
- * buffer. And, the driver converts original data into the
- * DMAC data as the following format:
- * original data: 1st byte, 2nd byte ...
- * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
- */
- len = t->len * 2;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- rspi_memory_to_8bit(buf, t->tx_buf, t->len);
- } else {
- len = t->len;
- buf = (void *)t->tx_buf;
+static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
+ unsigned int n)
+{
+ while (n-- > 0) {
+ if (tx) {
+ int ret = rspi_data_out(rspi, *tx++);
+ if (ret < 0)
+ return ret;
+ }
+ if (rx) {
+ int ret = rspi_data_in(rspi);
+ if (ret < 0)
+ return ret;
+ *rx++ = ret;
+ }
}
- if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
- ret = -EFAULT;
- goto end_nomap;
+ return 0;
+}
+
+static void rspi_dma_complete(void *arg)
+{
+ struct rspi_data *rspi = arg;
+
+ rspi->dma_callbacked = 1;
+ wake_up_interruptible(&rspi->wait);
+}
+
+static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
+ struct sg_table *rx)
+{
+ struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
+ u8 irq_mask = 0;
+ unsigned int other_irq = 0;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (tx) {
+ desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
+ tx->sgl, tx->nents, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx)
+ return -EIO;
+
+ irq_mask |= SPCR_SPTIE;
}
- desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc) {
- ret = -EIO;
- goto end;
+ if (rx) {
+ desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
+ rx->sgl, rx->nents, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx)
+ return -EIO;
+
+ irq_mask |= SPCR_SPRIE;
}
/*
- * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
+ * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer.
*/
- disable_irq(rspi->irq);
+ if (tx)
+ disable_irq(other_irq = rspi->tx_irq);
+ if (rx && rspi->rx_irq != other_irq)
+ disable_irq(rspi->rx_irq);
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
- rspi_enable_irq(rspi, SPCR_SPTIE);
+ rspi_enable_irq(rspi, irq_mask);
rspi->dma_callbacked = 0;
- desc->callback = rspi_dma_complete;
- desc->callback_param = rspi;
- dmaengine_submit(desc);
- dma_async_issue_pending(rspi->chan_tx);
+ if (rx) {
+ desc_rx->callback = rspi_dma_complete;
+ desc_rx->callback_param = rspi;
+ cookie = dmaengine_submit(desc_rx);
+ if (dma_submit_error(cookie))
+ return cookie;
+ dma_async_issue_pending(rspi->master->dma_rx);
+ }
+ if (tx) {
+ if (rx) {
+ /* No callback */
+ desc_tx->callback = NULL;
+ } else {
+ desc_tx->callback = rspi_dma_complete;
+ desc_tx->callback_param = rspi;
+ }
+ cookie = dmaengine_submit(desc_tx);
+ if (dma_submit_error(cookie))
+ return cookie;
+ dma_async_issue_pending(rspi->master->dma_tx);
+ }
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
@@ -383,270 +531,290 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
ret = 0;
else if (!ret)
ret = -ETIMEDOUT;
- rspi_disable_irq(rspi, SPCR_SPTIE);
- enable_irq(rspi->irq);
+ rspi_disable_irq(rspi, irq_mask);
-end:
- rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
-end_nomap:
- if (rspi->dma_width_16bit)
- kfree(buf);
+ if (tx)
+ enable_irq(rspi->tx_irq);
+ if (rx && rspi->rx_irq != other_irq)
+ enable_irq(rspi->rx_irq);
return ret;
}
-static void rspi_receive_init(struct rspi_data *rspi)
+static void rspi_receive_init(const struct rspi_data *rspi)
{
- unsigned char spsr;
+ u8 spsr;
spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
- rspi_read16(rspi, RSPI_SPDR); /* dummy read */
+ rspi_read_data(rspi); /* dummy read */
if (spsr & SPSR_OVRF)
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
- RSPI_SPCR);
+ RSPI_SPSR);
}
-static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static void rspi_rz_receive_init(const struct rspi_data *rspi)
{
- int remain = t->len;
- u8 *data;
-
rspi_receive_init(rspi);
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
+ rspi_write8(rspi, 0, RSPI_SPBFCR);
+}
- data = (u8 *)t->rx_buf;
- while (remain > 0) {
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
- RSPI_SPCR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* dummy write for generate clock */
- rspi_write16(rspi, 0x00, RSPI_SPDR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: receive timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* SPDR allows 16 or 32-bit access only */
- *data = (u8)rspi_read16(rspi, RSPI_SPDR);
+static void qspi_receive_init(const struct rspi_data *rspi)
+{
+ u8 spsr;
- data++;
- remain--;
- }
+ spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF)
+ rspi_read_data(rspi); /* dummy read */
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
+ rspi_write8(rspi, 0, QSPI_SPBFCR);
+}
- return 0;
+static bool __rspi_can_dma(const struct rspi_data *rspi,
+ const struct spi_transfer *xfer)
+{
+ return xfer->len > rspi->ops->fifo_size;
}
-static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
-{
- struct scatterlist sg, sg_dummy;
- void *dummy = NULL, *rx_buf = NULL;
- struct dma_async_tx_descriptor *desc, *desc_dummy;
- unsigned len;
- int ret = 0;
-
- if (rspi->dma_width_16bit) {
- /*
- * If DMAC bus width is 16-bit, the driver allocates a dummy
- * buffer. And, finally the driver converts the DMAC data into
- * actual data as the following format:
- * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
- * actual data: 1st byte, 2nd byte ...
- */
- len = t->len * 2;
- rx_buf = kmalloc(len, GFP_KERNEL);
- if (!rx_buf)
- return -ENOMEM;
- } else {
- len = t->len;
- rx_buf = t->rx_buf;
- }
+static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
- /* prepare dummy transfer to generate SPI clocks */
- dummy = kzalloc(len, GFP_KERNEL);
- if (!dummy) {
- ret = -ENOMEM;
- goto end_nomap;
- }
- if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
- DMA_TO_DEVICE)) {
- ret = -EFAULT;
- goto end_nomap;
- }
- desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
- DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc_dummy) {
- ret = -EIO;
- goto end_dummy_mapped;
- }
+ return __rspi_can_dma(rspi, xfer);
+}
- /* prepare receive transfer */
- if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx,
- DMA_FROM_DEVICE)) {
- ret = -EFAULT;
- goto end_dummy_mapped;
+static int rspi_common_transfer(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ int ret;
- }
- desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc) {
- ret = -EIO;
- goto end;
+ if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
+ /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
+ return rspi_dma_transfer(rspi, &xfer->tx_sg,
+ xfer->rx_buf ? &xfer->rx_sg : NULL);
}
- rspi_receive_init(rspi);
+ ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
+ if (ret < 0)
+ return ret;
- /*
- * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
- * called. So, this driver disables the IRQ while DMA transfer.
- */
- disable_irq(rspi->irq);
+ /* Wait for the last transmission */
+ rspi_wait_for_tx_empty(rspi);
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
- rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
- rspi->dma_callbacked = 0;
+ return 0;
+}
- desc->callback = rspi_dma_complete;
- desc->callback_param = rspi;
- dmaengine_submit(desc);
- dma_async_issue_pending(rspi->chan_rx);
+static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+ u8 spcr;
- desc_dummy->callback = NULL; /* No callback */
- dmaengine_submit(desc_dummy);
- dma_async_issue_pending(rspi->chan_tx);
+ spcr = rspi_read8(rspi, RSPI_SPCR);
+ if (xfer->rx_buf) {
+ rspi_receive_init(rspi);
+ spcr &= ~SPCR_TXMD;
+ } else {
+ spcr |= SPCR_TXMD;
+ }
+ rspi_write8(rspi, spcr, RSPI_SPCR);
- ret = wait_event_interruptible_timeout(rspi->wait,
- rspi->dma_callbacked, HZ);
- if (ret > 0 && rspi->dma_callbacked)
- ret = 0;
- else if (!ret)
- ret = -ETIMEDOUT;
- rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
+ return rspi_common_transfer(rspi, xfer);
+}
- enable_irq(rspi->irq);
+static int rspi_rz_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+ int ret;
-end:
- rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
-end_dummy_mapped:
- rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE);
-end_nomap:
- if (rspi->dma_width_16bit) {
- if (!ret)
- rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len);
- kfree(rx_buf);
- }
- kfree(dummy);
+ rspi_rz_receive_init(rspi);
- return ret;
+ return rspi_common_transfer(rspi, xfer);
}
-static int rspi_is_dma(struct rspi_data *rspi, struct spi_transfer *t)
+static int qspi_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
{
- if (t->tx_buf && rspi->chan_tx)
- return 1;
- /* If the module receives data by DMAC, it also needs TX DMAC */
- if (t->rx_buf && rspi->chan_tx && rspi->chan_rx)
- return 1;
+ qspi_receive_init(rspi);
- return 0;
+ return rspi_common_transfer(rspi, xfer);
}
-static void rspi_work(struct work_struct *work)
+static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
{
- struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
- struct spi_message *mesg;
- struct spi_transfer *t;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&rspi->lock, flags);
- while (!list_empty(&rspi->queue)) {
- mesg = list_entry(rspi->queue.next, struct spi_message, queue);
- list_del_init(&mesg->queue);
- spin_unlock_irqrestore(&rspi->lock, flags);
-
- rspi_assert_ssl(rspi);
-
- list_for_each_entry(t, &mesg->transfers, transfer_list) {
- if (t->tx_buf) {
- if (rspi_is_dma(rspi, t))
- ret = rspi_send_dma(rspi, t);
- else
- ret = rspi_send_pio(rspi, mesg, t);
- if (ret < 0)
- goto error;
- }
- if (t->rx_buf) {
- if (rspi_is_dma(rspi, t))
- ret = rspi_receive_dma(rspi, t);
- else
- ret = rspi_receive_pio(rspi, mesg, t);
- if (ret < 0)
- goto error;
- }
- mesg->actual_length += t->len;
- }
- rspi_negate_ssl(rspi);
+ if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer))
+ return rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
- mesg->status = 0;
- mesg->complete(mesg->context);
+ ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len);
+ if (ret < 0)
+ return ret;
- spin_lock_irqsave(&rspi->lock, flags);
- }
+ /* Wait for the last transmission */
+ rspi_wait_for_tx_empty(rspi);
- return;
+ return 0;
+}
-error:
- mesg->status = ret;
- mesg->complete(mesg->context);
+static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
+{
+ if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer))
+ return rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
+
+ return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len);
+}
+
+static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ if (spi->mode & SPI_LOOP) {
+ return qspi_transfer_out_in(rspi, xfer);
+ } else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Write */
+ return qspi_transfer_out(rspi, xfer);
+ } else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Read */
+ return qspi_transfer_in(rspi, xfer);
+ } else {
+ /* Single SPI Transfer */
+ return qspi_transfer_out_in(rspi, xfer);
+ }
}
static int rspi_setup(struct spi_device *spi)
{
struct rspi_data *rspi = spi_master_get_devdata(spi->master);
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
rspi->max_speed_hz = spi->max_speed_hz;
- rspi_set_config_register(rspi, 8);
+ rspi->spcmd = SPCMD_SSLKP;
+ if (spi->mode & SPI_CPOL)
+ rspi->spcmd |= SPCMD_CPOL;
+ if (spi->mode & SPI_CPHA)
+ rspi->spcmd |= SPCMD_CPHA;
+
+ /* CMOS output mode and MOSI signal from previous transfer */
+ rspi->sppcr = 0;
+ if (spi->mode & SPI_LOOP)
+ rspi->sppcr |= SPPCR_SPLP;
+
+ set_config_register(rspi, 8);
return 0;
}
-static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg)
+static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(spi->master);
- unsigned long flags;
+ if (xfer->tx_buf)
+ switch (xfer->tx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL;
+ default:
+ return 0;
+ }
+ if (xfer->rx_buf)
+ switch (xfer->rx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
+ default:
+ return 0;
+ }
- mesg->actual_length = 0;
- mesg->status = -EINPROGRESS;
+ return 0;
+}
- spin_lock_irqsave(&rspi->lock, flags);
- list_add_tail(&mesg->queue, &rspi->queue);
- schedule_work(&rspi->ws);
- spin_unlock_irqrestore(&rspi->lock, flags);
+static int qspi_setup_sequencer(struct rspi_data *rspi,
+ const struct spi_message *msg)
+{
+ const struct spi_transfer *xfer;
+ unsigned int i = 0, len = 0;
+ u16 current_mode = 0xffff, mode;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ mode = qspi_transfer_mode(xfer);
+ if (mode == current_mode) {
+ len += xfer->len;
+ continue;
+ }
+
+ /* Transfer mode change */
+ if (i) {
+ /* Set transfer data length of previous transfer */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ }
+
+ if (i >= QSPI_NUM_SPCMD) {
+ dev_err(&msg->spi->dev,
+ "Too many different transfer modes");
+ return -EINVAL;
+ }
+
+ /* Program transfer mode for this transfer */
+ rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
+ current_mode = mode;
+ len = xfer->len;
+ i++;
+ }
+ if (i) {
+ /* Set final transfer data length and sequence length */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ rspi_write8(rspi, i - 1, RSPI_SPSCR);
+ }
return 0;
}
-static void rspi_cleanup(struct spi_device *spi)
+static int rspi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (msg->spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
+ /* Setup sequencer for messages with multiple transfer modes */
+ ret = qspi_setup_sequencer(rspi, msg);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable SPI function in master mode */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
+ return 0;
}
-static irqreturn_t rspi_irq(int irq, void *_sr)
+static int rspi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct rspi_data *rspi = (struct rspi_data *)_sr;
- unsigned long spsr;
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ /* Disable SPI function */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
+
+ /* Reset sequencer for Single SPI Transfers */
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+ rspi_write8(rspi, 0, RSPI_SPSCR);
+ return 0;
+}
+
+static irqreturn_t rspi_irq_mux(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
irqreturn_t ret = IRQ_NONE;
- unsigned char disable_irq = 0;
+ u8 disable_irq = 0;
rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
@@ -663,86 +831,204 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
return ret;
}
-static bool rspi_filter(struct dma_chan *chan, void *filter_param)
+static irqreturn_t rspi_irq_rx(int irq, void *_sr)
{
- chan->private = filter_param;
- return true;
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF) {
+ rspi_disable_irq(rspi, SPCR_SPRIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
+}
+
+static irqreturn_t rspi_irq_tx(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPTEF) {
+ rspi_disable_irq(rspi, SPCR_SPTIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
}
-static void __devinit rspi_request_dma(struct rspi_data *rspi,
- struct platform_device *pdev)
+static struct dma_chan *rspi_request_dma_chan(struct device *dev,
+ enum dma_transfer_direction dir,
+ unsigned int id,
+ dma_addr_t port_addr)
{
- struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ int ret;
- if (!rspi_pd)
- return;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
- rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
+ chan = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(unsigned long)id);
+ if (!chan) {
+ dev_warn(dev, "dma_request_channel failed\n");
+ return NULL;
+ }
- /* If the module receives data by DMAC, it also needs TX DMAC */
- if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- rspi->dma_rx.slave_id = rspi_pd->dma_rx_id;
- rspi->chan_rx = dma_request_channel(mask, rspi_filter,
- &rspi->dma_rx);
- if (rspi->chan_rx)
- dev_info(&pdev->dev, "Use DMA when rx.\n");
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.slave_id = id;
+ cfg.direction = dir;
+ if (dir == DMA_MEM_TO_DEV)
+ cfg.dst_addr = port_addr;
+ else
+ cfg.src_addr = port_addr;
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
+ dma_release_channel(chan);
+ return NULL;
}
- if (rspi_pd->dma_tx_id) {
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- rspi->dma_tx.slave_id = rspi_pd->dma_tx_id;
- rspi->chan_tx = dma_request_channel(mask, rspi_filter,
- &rspi->dma_tx);
- if (rspi->chan_tx)
- dev_info(&pdev->dev, "Use DMA when tx\n");
+
+ return chan;
+}
+
+static int rspi_request_dma(struct device *dev, struct spi_master *master,
+ const struct resource *res)
+{
+ const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
+
+ if (!rspi_pd || !rspi_pd->dma_rx_id || !rspi_pd->dma_tx_id)
+ return 0; /* The driver assumes no error. */
+
+ master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM,
+ rspi_pd->dma_rx_id,
+ res->start + RSPI_SPDR);
+ if (!master->dma_rx)
+ return -ENODEV;
+
+ master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV,
+ rspi_pd->dma_tx_id,
+ res->start + RSPI_SPDR);
+ if (!master->dma_tx) {
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ return -ENODEV;
}
+
+ master->can_dma = rspi_can_dma;
+ dev_info(dev, "DMA available");
+ return 0;
}
-static void __devexit rspi_release_dma(struct rspi_data *rspi)
+static void rspi_release_dma(struct rspi_data *rspi)
{
- if (rspi->chan_tx)
- dma_release_channel(rspi->chan_tx);
- if (rspi->chan_rx)
- dma_release_channel(rspi->chan_rx);
+ if (rspi->master->dma_tx)
+ dma_release_channel(rspi->master->dma_tx);
+ if (rspi->master->dma_rx)
+ dma_release_channel(rspi->master->dma_rx);
}
-static int __devexit rspi_remove(struct platform_device *pdev)
+static int rspi_remove(struct platform_device *pdev)
{
- struct rspi_data *rspi = dev_get_drvdata(&pdev->dev);
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
- spi_unregister_master(rspi->master);
rspi_release_dma(rspi);
- free_irq(platform_get_irq(pdev, 0), rspi);
- clk_put(rspi->clk);
- iounmap(rspi->addr);
- spi_master_put(rspi->master);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
-static int __devinit rspi_probe(struct platform_device *pdev)
+static const struct spi_ops rspi_ops = {
+ .set_config_register = rspi_set_config_register,
+ .transfer_one = rspi_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
+ .flags = SPI_MASTER_MUST_TX,
+ .fifo_size = 8,
+};
+
+static const struct spi_ops rspi_rz_ops = {
+ .set_config_register = rspi_rz_set_config_register,
+ .transfer_one = rspi_rz_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
+ .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
+ .fifo_size = 8, /* 8 for TX, 32 for RX */
+};
+
+static const struct spi_ops qspi_ops = {
+ .set_config_register = qspi_set_config_register,
+ .transfer_one = qspi_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
+ SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_RX_DUAL | SPI_RX_QUAD,
+ .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
+ .fifo_size = 32,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id rspi_of_match[] = {
+ /* RSPI on legacy SH */
+ { .compatible = "renesas,rspi", .data = &rspi_ops },
+ /* RSPI on RZ/A1H */
+ { .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
+ /* QSPI on R-Car Gen2 */
+ { .compatible = "renesas,qspi", .data = &qspi_ops },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rspi_of_match);
+
+static int rspi_parse_dt(struct device *dev, struct spi_master *master)
+{
+ u32 num_cs;
+ int error;
+
+ /* Parse DT properties */
+ error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
+ if (error) {
+ dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
+ return error;
+ }
+
+ master->num_chipselect = num_cs;
+ return 0;
+}
+#else
+#define rspi_of_match NULL
+static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+static int rspi_request_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, const char *suffix,
+ void *dev_id)
+{
+ const char *base = dev_name(dev);
+ size_t len = strlen(base) + strlen(suffix) + 2;
+ char *name = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ snprintf(name, len, "%s:%s", base, suffix);
+ return devm_request_irq(dev, irq, handler, 0, name, dev_id);
+}
+
+static int rspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
struct rspi_data *rspi;
- int ret, irq;
- char clk_name[16];
-
- /* get base addr */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(res == NULL)) {
- dev_err(&pdev->dev, "invalid resource\n");
- return -EINVAL;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq error\n");
- return -ENODEV;
- }
+ int ret;
+ const struct of_device_id *of_id;
+ const struct rspi_plat_data *rspi_pd;
+ const struct spi_ops *ops;
master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
if (master == NULL) {
@@ -750,75 +1036,137 @@ static int __devinit rspi_probe(struct platform_device *pdev)
return -ENOMEM;
}
- rspi = spi_master_get_devdata(master);
- dev_set_drvdata(&pdev->dev, rspi);
+ of_id = of_match_device(rspi_of_match, &pdev->dev);
+ if (of_id) {
+ ops = of_id->data;
+ ret = rspi_parse_dt(&pdev->dev, master);
+ if (ret)
+ goto error1;
+ } else {
+ ops = (struct spi_ops *)pdev->id_entry->driver_data;
+ rspi_pd = dev_get_platdata(&pdev->dev);
+ if (rspi_pd && rspi_pd->num_chipselect)
+ master->num_chipselect = rspi_pd->num_chipselect;
+ else
+ master->num_chipselect = 2; /* default */
+ };
+
+ /* ops parameter check */
+ if (!ops->set_config_register) {
+ dev_err(&pdev->dev, "there is no set_config_register\n");
+ ret = -ENODEV;
+ goto error1;
+ }
+ rspi = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, rspi);
+ rspi->ops = ops;
rspi->master = master;
- rspi->addr = ioremap(res->start, resource_size(res));
- if (rspi->addr == NULL) {
- dev_err(&pdev->dev, "ioremap error.\n");
- ret = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rspi->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rspi->addr)) {
+ ret = PTR_ERR(rspi->addr);
goto error1;
}
- snprintf(clk_name, sizeof(clk_name), "rspi%d", pdev->id);
- rspi->clk = clk_get(&pdev->dev, clk_name);
+ rspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rspi->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(rspi->clk);
- goto error2;
+ goto error1;
}
- clk_enable(rspi->clk);
- INIT_LIST_HEAD(&rspi->queue);
- spin_lock_init(&rspi->lock);
- INIT_WORK(&rspi->ws, rspi_work);
+ pm_runtime_enable(&pdev->dev);
+
init_waitqueue_head(&rspi->wait);
- master->num_chipselect = 2;
master->bus_num = pdev->id;
master->setup = rspi_setup;
- master->transfer = rspi_transfer;
- master->cleanup = rspi_cleanup;
+ master->auto_runtime_pm = true;
+ master->transfer_one = ops->transfer_one;
+ master->prepare_message = rspi_prepare_message;
+ master->unprepare_message = rspi_unprepare_message;
+ master->mode_bits = ops->mode_bits;
+ master->flags = ops->flags;
+ master->dev.of_node = pdev->dev.of_node;
+
+ ret = platform_get_irq_byname(pdev, "rx");
+ if (ret < 0) {
+ ret = platform_get_irq_byname(pdev, "mux");
+ if (ret < 0)
+ ret = platform_get_irq(pdev, 0);
+ if (ret >= 0)
+ rspi->rx_irq = rspi->tx_irq = ret;
+ } else {
+ rspi->rx_irq = ret;
+ ret = platform_get_irq_byname(pdev, "tx");
+ if (ret >= 0)
+ rspi->tx_irq = ret;
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "platform_get_irq error\n");
+ goto error2;
+ }
- ret = request_irq(irq, rspi_irq, 0, dev_name(&pdev->dev), rspi);
+ if (rspi->rx_irq == rspi->tx_irq) {
+ /* Single multiplexed interrupt */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
+ "mux", rspi);
+ } else {
+ /* Multi-interrupt mode, only SPRI and SPTI are used */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
+ "rx", rspi);
+ if (!ret)
+ ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
+ rspi_irq_tx, "tx", rspi);
+ }
if (ret < 0) {
dev_err(&pdev->dev, "request_irq error\n");
- goto error3;
+ goto error2;
}
- rspi->irq = irq;
- rspi_request_dma(rspi, pdev);
+ ret = rspi_request_dma(&pdev->dev, master, res);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "DMA not available, using PIO\n");
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
- goto error4;
+ goto error3;
}
dev_info(&pdev->dev, "probed\n");
return 0;
-error4:
- rspi_release_dma(rspi);
- free_irq(irq, rspi);
error3:
- clk_put(rspi->clk);
+ rspi_release_dma(rspi);
error2:
- iounmap(rspi->addr);
+ pm_runtime_disable(&pdev->dev);
error1:
spi_master_put(master);
return ret;
}
+static struct platform_device_id spi_driver_ids[] = {
+ { "rspi", (kernel_ulong_t)&rspi_ops },
+ { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
+ { "qspi", (kernel_ulong_t)&qspi_ops },
+ {},
+};
+
+MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+
static struct platform_driver rspi_driver = {
.probe = rspi_probe,
- .remove = __devexit_p(rspi_remove),
+ .remove = rspi_remove,
+ .id_table = spi_driver_ids,
.driver = {
- .name = "rspi",
+ .name = "renesas_spi",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(rspi_of_match),
},
};
module_platform_driver(rspi_driver);
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index a2a080b7f42..e713737d784 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -9,9 +9,7 @@
*
*/
-#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -29,7 +27,6 @@
#include <plat/regs-spi.h>
-#include <plat/fiq.h>
#include <asm/fiq.h>
#include "spi-s3c24xx-fiq.h"
@@ -78,14 +75,12 @@ struct s3c24xx_spi {
unsigned char *rx;
struct clk *clk;
- struct resource *ioarea;
struct spi_master *master;
struct spi_device *curdev;
struct device *dev;
struct s3c2410_spi_info *pdata;
};
-
#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
@@ -126,25 +121,15 @@ static int s3c24xx_spi_update_state(struct spi_device *spi,
{
struct s3c24xx_spi *hw = to_hw(spi);
struct s3c24xx_spi_devstate *cs = spi->controller_state;
- unsigned int bpw;
unsigned int hz;
unsigned int div;
unsigned long clk;
- bpw = t ? t->bits_per_word : spi->bits_per_word;
hz = t ? t->speed_hz : spi->max_speed_hz;
- if (!bpw)
- bpw = 8;
-
if (!hz)
hz = spi->max_speed_hz;
- if (bpw != 8) {
- dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw);
- return -EINVAL;
- }
-
if (spi->mode != cs->mode) {
u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
@@ -197,11 +182,11 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
/* allocate settings on the first call */
if (!cs) {
- cs = kzalloc(sizeof(struct s3c24xx_spi_devstate), GFP_KERNEL);
- if (!cs) {
- dev_err(&spi->dev, "no memory for controller state\n");
+ cs = devm_kzalloc(&spi->dev,
+ sizeof(struct s3c24xx_spi_devstate),
+ GFP_KERNEL);
+ if (!cs)
return -ENOMEM;
- }
cs->spcon = SPCON_DEFAULT;
cs->hz = -1;
@@ -223,11 +208,6 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
return 0;
}
-static void s3c24xx_spi_cleanup(struct spi_device *spi)
-{
- kfree(spi->controller_state);
-}
-
static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
{
return hw->tx ? hw->tx[count] : 0;
@@ -280,7 +260,7 @@ static inline u32 ack_bit(unsigned int irq)
* so the caller does not need to do anything more than start the transfer
* as normal, since the IRQ will have been re-routed to the FIQ handler.
*/
-void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
+static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
{
struct pt_regs regs;
enum spi_fiq_mode mode;
@@ -506,7 +486,7 @@ static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw)
}
}
-static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
+static int s3c24xx_spi_probe(struct platform_device *pdev)
{
struct s3c2410_spi_info *pdata;
struct s3c24xx_spi *hw;
@@ -517,15 +497,14 @@ static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
if (master == NULL) {
dev_err(&pdev->dev, "No memory for spi_master\n");
- err = -ENOMEM;
- goto err_nomem;
+ return -ENOMEM;
}
hw = spi_master_get_devdata(master);
memset(hw, 0, sizeof(struct s3c24xx_spi));
- hw->master = spi_master_get(master);
- hw->pdata = pdata = pdev->dev.platform_data;
+ hw->master = master;
+ hw->pdata = pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
if (pdata == NULL) {
@@ -548,6 +527,7 @@ static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = pdata->bus_num;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
/* setup the state for the bitbang driver */
@@ -557,53 +537,36 @@ static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
hw->bitbang.txrx_bufs = s3c24xx_spi_txrx;
hw->master->setup = s3c24xx_spi_setup;
- hw->master->cleanup = s3c24xx_spi_cleanup;
dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
/* find and map our resources */
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- err = -ENOENT;
- goto err_no_iores;
- }
-
- hw->ioarea = request_mem_region(res->start, resource_size(res),
- pdev->name);
-
- if (hw->ioarea == NULL) {
- dev_err(&pdev->dev, "Cannot reserve region\n");
- err = -ENXIO;
- goto err_no_iores;
- }
-
- hw->regs = ioremap(res->start, resource_size(res));
- if (hw->regs == NULL) {
- dev_err(&pdev->dev, "Cannot map IO\n");
- err = -ENXIO;
- goto err_no_iomap;
+ hw->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->regs)) {
+ err = PTR_ERR(hw->regs);
+ goto err_no_pdata;
}
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq < 0) {
dev_err(&pdev->dev, "No IRQ specified\n");
err = -ENOENT;
- goto err_no_irq;
+ goto err_no_pdata;
}
- err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw);
+ err = devm_request_irq(&pdev->dev, hw->irq, s3c24xx_spi_irq, 0,
+ pdev->name, hw);
if (err) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto err_no_irq;
+ goto err_no_pdata;
}
- hw->clk = clk_get(&pdev->dev, "spi");
+ hw->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(hw->clk)) {
dev_err(&pdev->dev, "No clock for device\n");
err = PTR_ERR(hw->clk);
- goto err_no_clk;
+ goto err_no_pdata;
}
/* setup any gpio we can */
@@ -615,7 +578,8 @@ static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
goto err_register;
}
- err = gpio_request(pdata->pin_cs, dev_name(&pdev->dev));
+ err = devm_gpio_request(&pdev->dev, pdata->pin_cs,
+ dev_name(&pdev->dev));
if (err) {
dev_err(&pdev->dev, "Failed to get gpio for cs\n");
goto err_register;
@@ -639,50 +603,19 @@ static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
return 0;
err_register:
- if (hw->set_cs == s3c24xx_spi_gpiocs)
- gpio_free(pdata->pin_cs);
-
clk_disable(hw->clk);
- clk_put(hw->clk);
-
- err_no_clk:
- free_irq(hw->irq, hw);
-
- err_no_irq:
- iounmap(hw->regs);
-
- err_no_iomap:
- release_resource(hw->ioarea);
- kfree(hw->ioarea);
- err_no_iores:
err_no_pdata:
spi_master_put(hw->master);
-
- err_nomem:
return err;
}
-static int __devexit s3c24xx_spi_remove(struct platform_device *dev)
+static int s3c24xx_spi_remove(struct platform_device *dev)
{
struct s3c24xx_spi *hw = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
-
spi_bitbang_stop(&hw->bitbang);
-
clk_disable(hw->clk);
- clk_put(hw->clk);
-
- free_irq(hw->irq, hw);
- iounmap(hw->regs);
-
- if (hw->set_cs == s3c24xx_spi_gpiocs)
- gpio_free(hw->pdata->pin_cs);
-
- release_resource(hw->ioarea);
- kfree(hw->ioarea);
-
spi_master_put(hw->master);
return 0;
}
@@ -692,7 +625,12 @@ static int __devexit s3c24xx_spi_remove(struct platform_device *dev)
static int s3c24xx_spi_suspend(struct device *dev)
{
- struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev));
+ struct s3c24xx_spi *hw = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(hw->master);
+ if (ret)
+ return ret;
if (hw->pdata && hw->pdata->gpio_setup)
hw->pdata->gpio_setup(hw->pdata, 0);
@@ -703,10 +641,10 @@ static int s3c24xx_spi_suspend(struct device *dev)
static int s3c24xx_spi_resume(struct device *dev)
{
- struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev));
+ struct s3c24xx_spi *hw = dev_get_drvdata(dev);
s3c24xx_spi_initialsetup(hw);
- return 0;
+ return spi_master_resume(hw->master);
}
static const struct dev_pm_ops s3c24xx_spi_pmops = {
@@ -722,7 +660,7 @@ static const struct dev_pm_ops s3c24xx_spi_pmops = {
MODULE_ALIAS("platform:s3c2410-spi");
static struct platform_driver s3c24xx_spi_driver = {
.probe = s3c24xx_spi_probe,
- .remove = __devexit_p(s3c24xx_spi_remove),
+ .remove = s3c24xx_spi_remove,
.driver = {
.name = "s3c2410-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 6e7a805d324..75a56968b14 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -19,11 +19,11 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -31,10 +31,10 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <mach/dma.h>
#include <linux/platform_data/spi-s3c64xx.h>
#define MAX_SPI_PORTS 3
+#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
/* Registers and bit-fields */
@@ -62,7 +62,7 @@
#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
-#define S3C64XX_SPI_PSR_MASK 0xff
+#define S3C64XX_SPI_PSR_MASK 0xff
#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
@@ -126,15 +126,15 @@
#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+#define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
#define RXBUSY (1<<2)
#define TXBUSY (1<<3)
struct s3c64xx_spi_dma_data {
- unsigned ch;
+ struct dma_chan *ch;
enum dma_transfer_direction direction;
- enum dma_ch dmach;
- struct property *dma_prop;
+ unsigned int dmach;
};
/**
@@ -155,6 +155,7 @@ struct s3c64xx_spi_port_config {
int fifo_lvl_mask[MAX_SPI_PORTS];
int rx_lvl_offset;
int tx_st_done;
+ int quirks;
bool high_speed;
bool clk_from_cmu;
};
@@ -166,7 +167,6 @@ struct s3c64xx_spi_port_config {
* @master: Pointer to the SPI Protocol master.
* @cntrlr_info: Platform specific data for the controller this driver manages.
* @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
- * @queue: To log SPI xfer requests.
* @lock: Controller specific lock.
* @state: Set of FLAGS to indicate status.
* @rx_dmach: Controller's DMA channel for Rx.
@@ -187,7 +187,6 @@ struct s3c64xx_spi_driver_data {
struct spi_master *master;
struct s3c64xx_spi_info *cntrlr_info;
struct spi_device *tgl_spi;
- struct list_head queue;
spinlock_t lock;
unsigned long sfr_start;
struct completion xfer_completion;
@@ -196,14 +195,9 @@ struct s3c64xx_spi_driver_data {
unsigned cur_speed;
struct s3c64xx_spi_dma_data rx_dma;
struct s3c64xx_spi_dma_data tx_dma;
- struct samsung_dma_ops *ops;
struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
- unsigned long gpios[4];
-};
-
-static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
- .name = "samsung-spi-dma",
+ bool cs_gpio;
};
static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
@@ -215,6 +209,10 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
val |= S3C64XX_SPI_CH_SW_RST;
val &= ~S3C64XX_SPI_CH_HS_EN;
writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -248,10 +246,6 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
writel(val, regs + S3C64XX_SPI_MODE_CFG);
-
- val = readl(regs + S3C64XX_SPI_CH_CFG);
- val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
- writel(val, regs + S3C64XX_SPI_CH_CFG);
}
static void s3c64xx_spi_dmacb(void *data)
@@ -283,54 +277,111 @@ static void s3c64xx_spi_dmacb(void *data)
}
static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
- unsigned len, dma_addr_t buf)
+ struct sg_table *sgt)
{
struct s3c64xx_spi_driver_data *sdd;
- struct samsung_dma_prep info;
- struct samsung_dma_config config;
+ struct dma_slave_config config;
+ struct dma_async_tx_descriptor *desc;
+
+ memset(&config, 0, sizeof(config));
if (dma->direction == DMA_DEV_TO_MEM) {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, rx_dma);
- config.direction = sdd->rx_dma.direction;
- config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
- config.width = sdd->cur_bpw / 8;
- sdd->ops->config(sdd->rx_dma.ch, &config);
+ config.direction = dma->direction;
+ config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
+ config.src_addr_width = sdd->cur_bpw / 8;
+ config.src_maxburst = 1;
+ dmaengine_slave_config(dma->ch, &config);
} else {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, tx_dma);
- config.direction = sdd->tx_dma.direction;
- config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
- config.width = sdd->cur_bpw / 8;
- sdd->ops->config(sdd->tx_dma.ch, &config);
+ config.direction = dma->direction;
+ config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
+ config.dst_addr_width = sdd->cur_bpw / 8;
+ config.dst_maxburst = 1;
+ dmaengine_slave_config(dma->ch, &config);
}
- info.cap = DMA_SLAVE;
- info.len = len;
- info.fp = s3c64xx_spi_dmacb;
- info.fp_param = dma;
- info.direction = dma->direction;
- info.buf = buf;
+ desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
+ dma->direction, DMA_PREP_INTERRUPT);
+
+ desc->callback = s3c64xx_spi_dmacb;
+ desc->callback_param = dma;
- sdd->ops->prepare(dma->ch, &info);
- sdd->ops->trigger(dma->ch);
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->ch);
}
-static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
+static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
{
- struct samsung_dma_req req;
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+ dma_filter_fn filter = sdd->cntrlr_info->filter;
+ struct device *dev = &sdd->pdev->dev;
+ dma_cap_mask_t mask;
+ int ret;
+
+ if (!is_polling(sdd)) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Acquire DMA channels */
+ sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
+ (void *)sdd->rx_dma.dmach, dev, "rx");
+ if (!sdd->rx_dma.ch) {
+ dev_err(dev, "Failed to get RX DMA channel\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ spi->dma_rx = sdd->rx_dma.ch;
+
+ sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
+ (void *)sdd->tx_dma.dmach, dev, "tx");
+ if (!sdd->tx_dma.ch) {
+ dev_err(dev, "Failed to get TX DMA channel\n");
+ ret = -EBUSY;
+ goto out_rx;
+ }
+ spi->dma_tx = sdd->tx_dma.ch;
+ }
- sdd->ops = samsung_dma_get_ops();
+ ret = pm_runtime_get_sync(&sdd->pdev->dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable device: %d\n", ret);
+ goto out_tx;
+ }
- req.cap = DMA_SLAVE;
- req.client = &s3c64xx_spi_dma_client;
+ return 0;
+
+out_tx:
+ dma_release_channel(sdd->tx_dma.ch);
+out_rx:
+ dma_release_channel(sdd->rx_dma.ch);
+out:
+ return ret;
+}
- req.dt_dmach_prop = sdd->rx_dma.dma_prop;
- sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &req);
- req.dt_dmach_prop = sdd->tx_dma.dma_prop;
- sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &req);
+static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+
+ /* Free DMA channels */
+ if (!is_polling(sdd)) {
+ dma_release_channel(sdd->rx_dma.ch);
+ dma_release_channel(sdd->tx_dma.ch);
+ }
+
+ pm_runtime_put(&sdd->pdev->dev);
+ return 0;
+}
+
+static bool s3c64xx_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- return 1;
+ return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
}
static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
@@ -364,7 +415,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
- prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
+ prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else {
switch (sdd->cur_bpw) {
case 32:
@@ -396,7 +447,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
- prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
+ prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
}
}
@@ -404,109 +455,132 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
}
-static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
- struct spi_device *spi)
+static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
+ int timeout_ms)
{
- struct s3c64xx_spi_csinfo *cs;
+ void __iomem *regs = sdd->regs;
+ unsigned long val = 1;
+ u32 status;
- if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
- if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
- /* Deselect the last toggled device */
- cs = sdd->tgl_spi->controller_data;
- gpio_set_value(cs->line,
- spi->mode & SPI_CS_HIGH ? 0 : 1);
- }
- sdd->tgl_spi = NULL;
- }
+ /* max fifo depth available */
+ u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
+
+ if (timeout_ms)
+ val = msecs_to_loops(timeout_ms);
- cs = spi->controller_data;
- gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
+ do {
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
+
+ /* return the actual received data length */
+ return RX_FIFO_LVL(status, sdd);
}
-static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
- struct spi_transfer *xfer, int dma_mode)
+static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
{
void __iomem *regs = sdd->regs;
unsigned long val;
+ u32 status;
int ms;
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
ms += 10; /* some tolerance */
- if (dma_mode) {
- val = msecs_to_jiffies(ms) + 10;
- val = wait_for_completion_timeout(&sdd->xfer_completion, val);
- } else {
- u32 status;
- val = msecs_to_loops(ms);
- do {
+ val = msecs_to_jiffies(ms) + 10;
+ val = wait_for_completion_timeout(&sdd->xfer_completion, val);
+
+ /*
+ * If the previous xfer was completed within timeout, then
+ * proceed further else return -EIO.
+ * DmaTx returns after simply writing data in the FIFO,
+ * w/o waiting for real transmission on the bus to finish.
+ * DmaRx returns only after Dma read data from FIFO which
+ * needs bus transmission to finish, so we don't worry if
+ * Xfer involved Rx(with or without Tx).
+ */
+ if (val && !xfer->rx_buf) {
+ val = msecs_to_loops(10);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ while ((TX_FIFO_LVL(status, sdd)
+ || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
+ && --val) {
+ cpu_relax();
status = readl(regs + S3C64XX_SPI_STATUS);
- } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
+ }
+
}
+ /* If timed out while checking rx/tx status return error */
if (!val)
return -EIO;
- if (dma_mode) {
- u32 status;
-
- /*
- * DmaTx returns after simply writing data in the FIFO,
- * w/o waiting for real transmission on the bus to finish.
- * DmaRx returns only after Dma read data from FIFO which
- * needs bus transmission to finish, so we don't worry if
- * Xfer involved Rx(with or without Tx).
- */
- if (xfer->rx_buf == NULL) {
- val = msecs_to_loops(10);
- status = readl(regs + S3C64XX_SPI_STATUS);
- while ((TX_FIFO_LVL(status, sdd)
- || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
- && --val) {
- cpu_relax();
- status = readl(regs + S3C64XX_SPI_STATUS);
- }
+ return 0;
+}
- if (!val)
- return -EIO;
- }
- } else {
- /* If it was only Tx */
- if (xfer->rx_buf == NULL) {
- sdd->state &= ~TXBUSY;
- return 0;
- }
+static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long val;
+ u32 status;
+ int loops;
+ u32 cpy_len;
+ u8 *buf;
+ int ms;
+
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
+ ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ ms += 10; /* some tolerance */
+
+ val = msecs_to_loops(ms);
+ do {
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
+
+
+ /* If it was only Tx */
+ if (!xfer->rx_buf) {
+ sdd->state &= ~TXBUSY;
+ return 0;
+ }
+
+ /*
+ * If the receive length is bigger than the controller fifo
+ * size, calculate the loops and read the fifo as many times.
+ * loops = length / max fifo size (calculated by using the
+ * fifo mask).
+ * For any size less than the fifo size the below code is
+ * executed atleast once.
+ */
+ loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
+ buf = xfer->rx_buf;
+ do {
+ /* wait for data to be received in the fifo */
+ cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
+ (loops ? ms : 0));
switch (sdd->cur_bpw) {
case 32:
ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
- xfer->rx_buf, xfer->len / 4);
+ buf, cpy_len / 4);
break;
case 16:
ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
- xfer->rx_buf, xfer->len / 2);
+ buf, cpy_len / 2);
break;
default:
ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
- xfer->rx_buf, xfer->len);
+ buf, cpy_len);
break;
}
- sdd->state &= ~RXBUSY;
- }
-
- return 0;
-}
-
-static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
- struct spi_device *spi)
-{
- struct s3c64xx_spi_csinfo *cs = spi->controller_data;
- if (sdd->tgl_spi == spi)
- sdd->tgl_spi = NULL;
+ buf = buf + cpy_len;
+ } while (loops--);
+ sdd->state &= ~RXBUSY;
- gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
+ return 0;
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
@@ -516,7 +590,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
/* Disable Clock */
if (sdd->port_conf->clk_from_cmu) {
- clk_disable(sdd->src_clk);
+ clk_disable_unprepare(sdd->src_clk);
} else {
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_ENCLK_ENABLE;
@@ -564,7 +638,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
/* There is half-multiplier before the SPI */
clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
/* Enable Clock */
- clk_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->src_clk);
} else {
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
@@ -582,91 +656,12 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
-static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
- struct spi_message *msg)
-{
- struct device *dev = &sdd->pdev->dev;
- struct spi_transfer *xfer;
-
- if (msg->is_dma_mapped)
- return 0;
-
- /* First mark all xfer unmapped */
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- xfer->rx_dma = XFER_DMAADDR_INVALID;
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- }
-
- /* Map until end or first fail */
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-
- if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
- continue;
-
- if (xfer->tx_buf != NULL) {
- xfer->tx_dma = dma_map_single(dev,
- (void *)xfer->tx_buf, xfer->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, xfer->tx_dma)) {
- dev_err(dev, "dma_map_single Tx failed\n");
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- return -ENOMEM;
- }
- }
-
- if (xfer->rx_buf != NULL) {
- xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
- xfer->len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, xfer->rx_dma)) {
- dev_err(dev, "dma_map_single Rx failed\n");
- dma_unmap_single(dev, xfer->tx_dma,
- xfer->len, DMA_TO_DEVICE);
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- xfer->rx_dma = XFER_DMAADDR_INVALID;
- return -ENOMEM;
- }
- }
- }
-
- return 0;
-}
-
-static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
- struct spi_message *msg)
-{
- struct device *dev = &sdd->pdev->dev;
- struct spi_transfer *xfer;
-
- if (msg->is_dma_mapped)
- return;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-
- if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
- continue;
-
- if (xfer->rx_buf != NULL
- && xfer->rx_dma != XFER_DMAADDR_INVALID)
- dma_unmap_single(dev, xfer->rx_dma,
- xfer->len, DMA_FROM_DEVICE);
-
- if (xfer->tx_buf != NULL
- && xfer->tx_dma != XFER_DMAADDR_INVALID)
- dma_unmap_single(dev, xfer->tx_dma,
- xfer->len, DMA_TO_DEVICE);
- }
-}
-
-static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static int s3c64xx_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
- struct spi_transfer *xfer;
- int status = 0, cs_toggle = 0;
- u32 speed;
- u8 bpw;
/* If Master's(controller) state differs from that needed by Slave */
if (sdd->cur_speed != spi->max_speed_hz
@@ -678,157 +673,91 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
s3c64xx_spi_config(sdd);
}
- /* Map all the transfers if needed */
- if (s3c64xx_spi_map_mssg(sdd, msg)) {
- dev_err(&spi->dev,
- "Xfer: Unable to map message buffers!\n");
- status = -ENOMEM;
- goto out;
- }
-
/* Configure feedback delay */
writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-
- unsigned long flags;
- int use_dma;
-
- INIT_COMPLETION(sdd->xfer_completion);
-
- /* Only BPW and Speed may change across transfers */
- bpw = xfer->bits_per_word ? : spi->bits_per_word;
- speed = xfer->speed_hz ? : spi->max_speed_hz;
-
- if (xfer->len % (bpw / 8)) {
- dev_err(&spi->dev,
- "Xfer length(%u) not a multiple of word size(%u)\n",
- xfer->len, bpw / 8);
- status = -EIO;
- goto out;
- }
-
- if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
- sdd->cur_bpw = bpw;
- sdd->cur_speed = speed;
- s3c64xx_spi_config(sdd);
- }
-
- /* Polling method for xfers not bigger than FIFO capacity */
- if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
- use_dma = 0;
- else
- use_dma = 1;
-
- spin_lock_irqsave(&sdd->lock, flags);
+ return 0;
+}
- /* Pending only which is to be done */
- sdd->state &= ~RXBUSY;
- sdd->state &= ~TXBUSY;
+static int s3c64xx_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ int status;
+ u32 speed;
+ u8 bpw;
+ unsigned long flags;
+ int use_dma;
- enable_datapath(sdd, spi, xfer, use_dma);
+ reinit_completion(&sdd->xfer_completion);
- /* Slave Select */
- enable_cs(sdd, spi);
+ /* Only BPW and Speed may change across transfers */
+ bpw = xfer->bits_per_word;
+ speed = xfer->speed_hz ? : spi->max_speed_hz;
- /* Start the signals */
- writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
+ sdd->cur_bpw = bpw;
+ sdd->cur_speed = speed;
+ s3c64xx_spi_config(sdd);
+ }
- spin_unlock_irqrestore(&sdd->lock, flags);
+ /* Polling method for xfers not bigger than FIFO capacity */
+ use_dma = 0;
+ if (!is_polling(sdd) &&
+ (sdd->rx_dma.ch && sdd->tx_dma.ch &&
+ (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
+ use_dma = 1;
- status = wait_for_xfer(sdd, xfer, use_dma);
+ spin_lock_irqsave(&sdd->lock, flags);
- /* Quiese the signals */
- writel(S3C64XX_SPI_SLAVE_SIG_INACT,
- sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ /* Pending only which is to be done */
+ sdd->state &= ~RXBUSY;
+ sdd->state &= ~TXBUSY;
- if (status) {
- dev_err(&spi->dev, "I/O Error: "
- "rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
- xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
- (sdd->state & RXBUSY) ? 'f' : 'p',
- (sdd->state & TXBUSY) ? 'f' : 'p',
- xfer->len);
+ enable_datapath(sdd, spi, xfer, use_dma);
- if (use_dma) {
- if (xfer->tx_buf != NULL
- && (sdd->state & TXBUSY))
- sdd->ops->stop(sdd->tx_dma.ch);
- if (xfer->rx_buf != NULL
- && (sdd->state & RXBUSY))
- sdd->ops->stop(sdd->rx_dma.ch);
- }
+ /* Start the signals */
+ writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- goto out;
- }
+ spin_unlock_irqrestore(&sdd->lock, flags);
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
-
- if (xfer->cs_change) {
- /* Hint that the next mssg is gonna be
- for the same device */
- if (list_is_last(&xfer->transfer_list,
- &msg->transfers))
- cs_toggle = 1;
- else
- disable_cs(sdd, spi);
+ if (use_dma)
+ status = wait_for_dma(sdd, xfer);
+ else
+ status = wait_for_pio(sdd, xfer);
+
+ if (status) {
+ dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
+ xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
+ (sdd->state & RXBUSY) ? 'f' : 'p',
+ (sdd->state & TXBUSY) ? 'f' : 'p',
+ xfer->len);
+
+ if (use_dma) {
+ if (xfer->tx_buf != NULL
+ && (sdd->state & TXBUSY))
+ dmaengine_terminate_all(sdd->tx_dma.ch);
+ if (xfer->rx_buf != NULL
+ && (sdd->state & RXBUSY))
+ dmaengine_terminate_all(sdd->rx_dma.ch);
}
-
- msg->actual_length += xfer->len;
-
+ } else {
flush_fifo(sdd);
}
-out:
- if (!cs_toggle || status)
- disable_cs(sdd, spi);
- else
- sdd->tgl_spi = spi;
-
- s3c64xx_spi_unmap_mssg(sdd, msg);
-
- msg->status = status;
-
- spi_finalize_current_message(master);
-
- return 0;
-}
-
-static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
- /* Acquire DMA channels */
- while (!acquire_dma(sdd))
- msleep(10);
-
- pm_runtime_get_sync(&sdd->pdev->dev);
-
- return 0;
-}
-
-static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
- /* Free DMA channels */
- sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
- sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
-
- pm_runtime_put(&sdd->pdev->dev);
-
- return 0;
+ return status;
}
static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
- struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs;
struct device_node *slave_np, *data_np = NULL;
+ struct s3c64xx_spi_driver_data *sdd;
u32 fb_delay = 0;
+ sdd = spi_master_get_devdata(spi->master);
slave_np = spi->dev.of_node;
if (!slave_np) {
dev_err(&spi->dev, "device node not found\n");
@@ -843,16 +772,16 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs) {
- dev_err(&spi->dev, "could not allocate memory for controller"
- " data\n");
of_node_put(data_np);
return ERR_PTR(-ENOMEM);
}
- cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
+ /* The CS line is asserted/deasserted by the gpio pin */
+ if (sdd->cs_gpio)
+ cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
+
if (!gpio_is_valid(cs->line)) {
- dev_err(&spi->dev, "chip select gpio is not specified or "
- "invalid\n");
+ dev_err(&spi->dev, "chip select gpio is not specified or invalid\n");
kfree(cs);
of_node_put(data_np);
return ERR_PTR(-EINVAL);
@@ -875,13 +804,11 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_info *sci;
- struct spi_message *msg;
- unsigned long flags;
int err;
sdd = spi_master_get_devdata(spi->master);
if (!cs && spi->dev.of_node) {
- cs = s3c64xx_get_slave_ctrldata(sdd, spi);
+ cs = s3c64xx_get_slave_ctrldata(spi);
spi->controller_data = cs;
}
@@ -891,43 +818,25 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
if (!spi_get_ctldata(spi)) {
- err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
- dev_name(&spi->dev));
- if (err) {
- dev_err(&spi->dev,
- "Failed to get /CS gpio [%d]: %d\n",
- cs->line, err);
- goto err_gpio_req;
+ /* Request gpio only if cs line is asserted by gpio pins */
+ if (sdd->cs_gpio) {
+ err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
+ dev_name(&spi->dev));
+ if (err) {
+ dev_err(&spi->dev,
+ "Failed to get /CS gpio [%d]: %d\n",
+ cs->line, err);
+ goto err_gpio_req;
+ }
+
+ spi->cs_gpio = cs->line;
}
+
spi_set_ctldata(spi, cs);
}
sci = sdd->cntrlr_info;
- spin_lock_irqsave(&sdd->lock, flags);
-
- list_for_each_entry(msg, &sdd->queue, queue) {
- /* Is some mssg is already queued for this device */
- if (msg->spi == spi) {
- dev_err(&spi->dev,
- "setup: attempt while mssg in queue!\n");
- spin_unlock_irqrestore(&sdd->lock, flags);
- err = -EBUSY;
- goto err_msgq;
- }
- }
-
- spin_unlock_irqrestore(&sdd->lock, flags);
-
- if (spi->bits_per_word != 8
- && spi->bits_per_word != 16
- && spi->bits_per_word != 32) {
- dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
- spi->bits_per_word);
- err = -EINVAL;
- goto setup_exit;
- }
-
pm_runtime_get_sync(&sdd->pdev->dev);
/* Check if we can provide the requested rate */
@@ -959,20 +868,22 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
if (spi->max_speed_hz >= speed) {
spi->max_speed_hz = speed;
} else {
+ dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
+ spi->max_speed_hz);
err = -EINVAL;
goto setup_exit;
}
}
pm_runtime_put(&sdd->pdev->dev);
- disable_cs(sdd, spi);
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
return 0;
setup_exit:
+ pm_runtime_put(&sdd->pdev->dev);
/* setup() returns with device de-selected */
- disable_cs(sdd, spi);
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
-err_msgq:
gpio_free(cs->line);
spi_set_ctldata(spi, NULL);
@@ -986,9 +897,11 @@ err_gpio_req:
static void s3c64xx_spi_cleanup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
+ struct s3c64xx_spi_driver_data *sdd;
- if (cs) {
- gpio_free(cs->line);
+ sdd = spi_master_get_devdata(spi->master);
+ if (spi->cs_gpio) {
+ gpio_free(spi->cs_gpio);
if (spi->dev.of_node)
kfree(cs);
}
@@ -999,25 +912,30 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
{
struct s3c64xx_spi_driver_data *sdd = data;
struct spi_master *spi = sdd->master;
- unsigned int val;
-
- val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR);
-
- val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR |
- S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
- S3C64XX_SPI_PND_TX_OVERRUN_CLR |
- S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
+ unsigned int val, clr = 0;
- writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR);
+ val = readl(sdd->regs + S3C64XX_SPI_STATUS);
- if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR)
+ if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
+ clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
dev_err(&spi->dev, "RX overrun\n");
- if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR)
+ }
+ if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
+ clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
dev_err(&spi->dev, "RX underrun\n");
- if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR)
+ }
+ if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
+ clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
dev_err(&spi->dev, "TX overrun\n");
- if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR)
+ }
+ if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
+ clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
dev_err(&spi->dev, "TX underrun\n");
+ }
+
+ /* Clear the pending irq by setting and then clearing it */
+ writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
+ writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
return IRQ_HANDLED;
}
@@ -1041,9 +959,13 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
writel(0, regs + S3C64XX_SPI_MODE_CFG);
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
- /* Clear any irq pending bits */
- writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
- regs + S3C64XX_SPI_PENDING_CLR);
+ /* Clear any irq pending bits, should set and clear the bits */
+ val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
+ S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
+ S3C64XX_SPI_PND_TX_OVERRUN_CLR |
+ S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
+ writel(val, regs + S3C64XX_SPI_PENDING_CLR);
+ writel(0, regs + S3C64XX_SPI_PENDING_CLR);
writel(0, regs + S3C64XX_SPI_SWAP_CFG);
@@ -1056,108 +978,25 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
flush_fifo(sdd);
}
-static int __devinit s3c64xx_spi_get_dmares(
- struct s3c64xx_spi_driver_data *sdd, bool tx)
-{
- struct platform_device *pdev = sdd->pdev;
- struct s3c64xx_spi_dma_data *dma_data;
- struct property *prop;
- struct resource *res;
- char prop_name[15], *chan_str;
-
- if (tx) {
- dma_data = &sdd->tx_dma;
- dma_data->direction = DMA_MEM_TO_DEV;
- chan_str = "tx";
- } else {
- dma_data = &sdd->rx_dma;
- dma_data->direction = DMA_DEV_TO_MEM;
- chan_str = "rx";
- }
-
- if (!sdd->pdev->dev.of_node) {
- res = platform_get_resource(pdev, IORESOURCE_DMA, tx ? 0 : 1);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get SPI-%s dma "
- "resource\n", chan_str);
- return -ENXIO;
- }
- dma_data->dmach = res->start;
- return 0;
- }
-
- sprintf(prop_name, "%s-dma-channel", chan_str);
- prop = of_find_property(pdev->dev.of_node, prop_name, NULL);
- if (!prop) {
- dev_err(&pdev->dev, "%s dma channel property not specified\n",
- chan_str);
- return -ENXIO;
- }
-
- dma_data->dmach = DMACH_DT_PROP;
- dma_data->dma_prop = prop;
- return 0;
-}
-
#ifdef CONFIG_OF
-static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd)
-{
- struct device *dev = &sdd->pdev->dev;
- int idx, gpio, ret;
-
- /* find gpios for mosi, miso and clock lines */
- for (idx = 0; idx < 3; idx++) {
- gpio = of_get_gpio(dev->of_node, idx);
- if (!gpio_is_valid(gpio)) {
- dev_err(dev, "invalid gpio[%d]: %d\n", idx, gpio);
- goto free_gpio;
- }
-
- ret = gpio_request(gpio, "spi-bus");
- if (ret) {
- dev_err(dev, "gpio [%d] request failed: %d\n",
- gpio, ret);
- goto free_gpio;
- }
- }
- return 0;
-
-free_gpio:
- while (--idx >= 0)
- gpio_free(sdd->gpios[idx]);
- return -EINVAL;
-}
-
-static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
-{
- unsigned int idx;
- for (idx = 0; idx < 3; idx++)
- gpio_free(sdd->gpios[idx]);
-}
-
-static struct __devinit s3c64xx_spi_info * s3c64xx_spi_parse_dt(
- struct device *dev)
+static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
struct s3c64xx_spi_info *sci;
u32 temp;
sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
- if (!sci) {
- dev_err(dev, "memory allocation for spi_info failed\n");
+ if (!sci)
return ERR_PTR(-ENOMEM);
- }
if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
- dev_warn(dev, "spi bus clock parent not specified, using "
- "clock at index 0 as parent\n");
+ dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
sci->src_clk_nr = 0;
} else {
sci->src_clk_nr = temp;
}
if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
- dev_warn(dev, "number of chip select lines not specified, "
- "assuming 1 chip select line\n");
+ dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
sci->num_cs = 1;
} else {
sci->num_cs = temp;
@@ -1168,16 +1007,7 @@ static struct __devinit s3c64xx_spi_info * s3c64xx_spi_parse_dt(
#else
static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
- return dev->platform_data;
-}
-
-static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd)
-{
- return -EINVAL;
-}
-
-static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
-{
+ return dev_get_platdata(dev);
}
#endif
@@ -1197,11 +1027,12 @@ static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
platform_get_device_id(pdev)->driver_data;
}
-static int __init s3c64xx_spi_probe(struct platform_device *pdev)
+static int s3c64xx_spi_probe(struct platform_device *pdev)
{
struct resource *mem_res;
+ struct resource *res;
struct s3c64xx_spi_driver_data *sdd;
- struct s3c64xx_spi_info *sci = pdev->dev.platform_data;
+ struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
struct spi_master *master;
int ret, irq;
char clk_name[16];
@@ -1244,11 +1075,15 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
sdd->cntrlr_info = sci;
sdd->pdev = pdev;
sdd->sfr_start = mem_res->start;
+ sdd->cs_gpio = true;
if (pdev->dev.of_node) {
+ if (!of_find_property(pdev->dev.of_node, "cs-gpio", NULL))
+ sdd->cs_gpio = false;
+
ret = of_alias_get_id(pdev->dev.of_node, "spi");
if (ret < 0) {
- dev_err(&pdev->dev, "failed to get alias id, "
- "errno %d\n", ret);
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
+ ret);
goto err0;
}
sdd->port_id = ret;
@@ -1258,69 +1093,82 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
sdd->cur_bpw = 8;
- ret = s3c64xx_spi_get_dmares(sdd, true);
- if (ret)
- goto err0;
+ if (!sdd->pdev->dev.of_node) {
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n");
+ sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
+ } else
+ sdd->tx_dma.dmach = res->start;
- ret = s3c64xx_spi_get_dmares(sdd, false);
- if (ret)
- goto err0;
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n");
+ sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
+ } else
+ sdd->rx_dma.dmach = res->start;
+ }
+
+ sdd->tx_dma.direction = DMA_MEM_TO_DEV;
+ sdd->rx_dma.direction = DMA_DEV_TO_MEM;
master->dev.of_node = pdev->dev.of_node;
master->bus_num = sdd->port_id;
master->setup = s3c64xx_spi_setup;
master->cleanup = s3c64xx_spi_cleanup;
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
- master->transfer_one_message = s3c64xx_spi_transfer_one_message;
+ master->prepare_message = s3c64xx_spi_prepare_message;
+ master->transfer_one = s3c64xx_spi_transfer_one;
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->auto_runtime_pm = true;
+ if (!is_polling(sdd))
+ master->can_dma = s3c64xx_spi_can_dma;
- sdd->regs = devm_request_and_ioremap(&pdev->dev, mem_res);
- if (sdd->regs == NULL) {
- dev_err(&pdev->dev, "Unable to remap IO\n");
- ret = -ENXIO;
- goto err1;
+ sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(sdd->regs)) {
+ ret = PTR_ERR(sdd->regs);
+ goto err0;
}
- if (!sci->cfg_gpio && pdev->dev.of_node) {
- if (s3c64xx_spi_parse_dt_gpio(sdd))
- return -EBUSY;
- } else if (sci->cfg_gpio == NULL || sci->cfg_gpio()) {
+ if (sci->cfg_gpio && sci->cfg_gpio()) {
dev_err(&pdev->dev, "Unable to config gpio\n");
ret = -EBUSY;
- goto err2;
+ goto err0;
}
/* Setup clocks */
- sdd->clk = clk_get(&pdev->dev, "spi");
+ sdd->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(sdd->clk)) {
dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
ret = PTR_ERR(sdd->clk);
- goto err3;
+ goto err0;
}
- if (clk_enable(sdd->clk)) {
+ if (clk_prepare_enable(sdd->clk)) {
dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
ret = -EBUSY;
- goto err4;
+ goto err0;
}
sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
- sdd->src_clk = clk_get(&pdev->dev, clk_name);
+ sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(sdd->src_clk)) {
dev_err(&pdev->dev,
"Unable to acquire clock '%s'\n", clk_name);
ret = PTR_ERR(sdd->src_clk);
- goto err5;
+ goto err2;
}
- if (clk_enable(sdd->src_clk)) {
+ if (clk_prepare_enable(sdd->src_clk)) {
dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
ret = -EBUSY;
- goto err6;
+ goto err2;
}
/* Setup Deufult Mode */
@@ -1328,53 +1176,41 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
spin_lock_init(&sdd->lock);
init_completion(&sdd->xfer_completion);
- INIT_LIST_HEAD(&sdd->queue);
- ret = request_irq(irq, s3c64xx_spi_irq, 0, "spi-s3c64xx", sdd);
+ ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
+ "spi-s3c64xx", sdd);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
irq, ret);
- goto err7;
+ goto err3;
}
writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
sdd->regs + S3C64XX_SPI_INT_EN);
- if (spi_register_master(master)) {
- dev_err(&pdev->dev, "cannot register SPI master\n");
- ret = -EBUSY;
- goto err8;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
+ goto err3;
}
- dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
- "with %d Slaves attached\n",
+ dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
sdd->port_id, master->num_chipselect);
- dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
- mem_res->end, mem_res->start,
+ dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n",
+ mem_res,
sdd->rx_dma.dmach, sdd->tx_dma.dmach);
- pm_runtime_enable(&pdev->dev);
-
return 0;
-err8:
- free_irq(irq, sdd);
-err7:
- clk_disable(sdd->src_clk);
-err6:
- clk_put(sdd->src_clk);
-err5:
- clk_disable(sdd->clk);
-err4:
- clk_put(sdd->clk);
err3:
- if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
- s3c64xx_spi_dt_gpio_free(sdd);
+ clk_disable_unprepare(sdd->src_clk);
err2:
-err1:
+ clk_disable_unprepare(sdd->clk);
err0:
- platform_set_drvdata(pdev, NULL);
spi_master_put(master);
return ret;
@@ -1387,41 +1223,29 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- spi_unregister_master(master);
-
writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
- free_irq(platform_get_irq(pdev, 0), sdd);
-
- clk_disable(sdd->src_clk);
- clk_put(sdd->src_clk);
+ clk_disable_unprepare(sdd->src_clk);
- clk_disable(sdd->clk);
- clk_put(sdd->clk);
-
- if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
- s3c64xx_spi_dt_gpio_free(sdd);
-
- platform_set_drvdata(pdev, NULL);
- spi_master_put(master);
+ clk_disable_unprepare(sdd->clk);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int s3c64xx_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- spi_master_suspend(master);
-
- /* Disable the clock */
- clk_disable(sdd->src_clk);
- clk_disable(sdd->clk);
+ int ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
- if (!sdd->cntrlr_info->cfg_gpio && dev->of_node)
- s3c64xx_spi_dt_gpio_free(sdd);
+ if (!pm_runtime_suspended(dev)) {
+ clk_disable_unprepare(sdd->clk);
+ clk_disable_unprepare(sdd->src_clk);
+ }
sdd->cur_speed = 0; /* Output Clock is stopped */
@@ -1434,22 +1258,19 @@ static int s3c64xx_spi_resume(struct device *dev)
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
- if (!sci->cfg_gpio && dev->of_node)
- s3c64xx_spi_parse_dt_gpio(sdd);
- else
+ if (sci->cfg_gpio)
sci->cfg_gpio();
- /* Enable the clock */
- clk_enable(sdd->src_clk);
- clk_enable(sdd->clk);
+ if (!pm_runtime_suspended(dev)) {
+ clk_prepare_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->clk);
+ }
s3c64xx_spi_hwinit(sdd, sdd->port_id);
- spi_master_resume(master);
-
- return 0;
+ return spi_master_resume(master);
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
static int s3c64xx_spi_runtime_suspend(struct device *dev)
@@ -1457,8 +1278,8 @@ static int s3c64xx_spi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- clk_disable(sdd->clk);
- clk_disable(sdd->src_clk);
+ clk_disable_unprepare(sdd->clk);
+ clk_disable_unprepare(sdd->src_clk);
return 0;
}
@@ -1467,9 +1288,17 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sdd->src_clk);
+ if (ret != 0)
+ return ret;
- clk_enable(sdd->src_clk);
- clk_enable(sdd->clk);
+ ret = clk_prepare_enable(sdd->clk);
+ if (ret != 0) {
+ clk_disable_unprepare(sdd->src_clk);
+ return ret;
+ }
return 0;
}
@@ -1522,6 +1351,15 @@ static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
.clk_from_cmu = true,
};
+static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .quirks = S3C64XX_SPI_QUIRK_POLL,
+};
+
static struct platform_device_id s3c64xx_spi_driver_ids[] = {
{
.name = "s3c2443-spi",
@@ -1545,15 +1383,28 @@ static struct platform_device_id s3c64xx_spi_driver_ids[] = {
{ },
};
-#ifdef CONFIG_OF
static const struct of_device_id s3c64xx_spi_dt_match[] = {
+ { .compatible = "samsung,s3c2443-spi",
+ .data = (void *)&s3c2443_spi_port_config,
+ },
+ { .compatible = "samsung,s3c6410-spi",
+ .data = (void *)&s3c6410_spi_port_config,
+ },
+ { .compatible = "samsung,s5pc100-spi",
+ .data = (void *)&s5pc100_spi_port_config,
+ },
+ { .compatible = "samsung,s5pv210-spi",
+ .data = (void *)&s5pv210_spi_port_config,
+ },
{ .compatible = "samsung,exynos4210-spi",
.data = (void *)&exynos4_spi_port_config,
},
+ { .compatible = "samsung,exynos5440-spi",
+ .data = (void *)&exynos5440_spi_port_config,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
-#endif /* CONFIG_OF */
static struct platform_driver s3c64xx_spi_driver = {
.driver = {
@@ -1562,22 +1413,13 @@ static struct platform_driver s3c64xx_spi_driver = {
.pm = &s3c64xx_spi_pm,
.of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
},
+ .probe = s3c64xx_spi_probe,
.remove = s3c64xx_spi_remove,
.id_table = s3c64xx_spi_driver_ids,
};
MODULE_ALIAS("platform:s3c64xx-spi");
-static int __init s3c64xx_spi_init(void)
-{
- return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
-}
-subsys_initcall(s3c64xx_spi_init);
-
-static void __exit s3c64xx_spi_exit(void)
-{
- platform_driver_unregister(&s3c64xx_spi_driver);
-}
-module_exit(s3c64xx_spi_exit);
+module_platform_driver(s3c64xx_spi_driver);
MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 9eda21d739c..237f2e7a717 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -183,24 +183,9 @@ static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
static int sc18is602_check_transfer(struct spi_device *spi,
struct spi_transfer *t, int tlen)
{
- int bpw;
- uint32_t hz;
-
if (t && t->len + tlen > SC18IS602_BUFSIZ)
return -EINVAL;
- bpw = spi->bits_per_word;
- if (t && t->bits_per_word)
- bpw = t->bits_per_word;
- if (bpw != 8)
- return -EINVAL;
-
- hz = spi->max_speed_hz;
- if (t && t->speed_hz)
- hz = t->speed_hz;
- if (hz == 0)
- return -EINVAL;
-
return 0;
}
@@ -212,22 +197,15 @@ static int sc18is602_transfer_one(struct spi_master *master,
struct spi_transfer *t;
int status = 0;
- /* SC18IS602 does not support CS2 */
- if (hw->id == sc18is602 && spi->chip_select == 2) {
- status = -ENXIO;
- goto error;
- }
-
hw->tlen = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
- u32 hz = t->speed_hz ? : spi->max_speed_hz;
bool do_transfer;
status = sc18is602_check_transfer(spi, t, hw->tlen);
if (status < 0)
break;
- status = sc18is602_setup_transfer(hw, hz, spi->mode);
+ status = sc18is602_setup_transfer(hw, t->speed_hz, spi->mode);
if (status < 0)
break;
@@ -245,7 +223,6 @@ static int sc18is602_transfer_one(struct spi_master *master,
if (t->delay_usecs)
udelay(t->delay_usecs);
}
-error:
m->status = status;
spi_finalize_current_message(master);
@@ -254,13 +231,13 @@ error:
static int sc18is602_setup(struct spi_device *spi)
{
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
+ struct sc18is602 *hw = spi_master_get_devdata(spi->master);
- if (spi->mode & ~(SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST))
- return -EINVAL;
+ /* SC18IS602 does not support CS2 */
+ if (hw->id == sc18is602 && spi->chip_select == 2)
+ return -ENXIO;
- return sc18is602_check_transfer(spi, NULL, 0);
+ return 0;
}
static int sc18is602_probe(struct i2c_client *client,
@@ -315,11 +292,14 @@ static int sc18is602_probe(struct i2c_client *client,
}
master->bus_num = client->adapter->nr;
master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->setup = sc18is602_setup;
master->transfer_one_message = sc18is602_transfer_one;
master->dev.of_node = np;
+ master->min_speed_hz = hw->freq / 128;
+ master->max_speed_hz = hw->freq / 4;
- error = spi_register_master(master);
+ error = devm_spi_register_master(dev, master);
if (error)
goto error_reg;
@@ -330,16 +310,6 @@ error_reg:
return error;
}
-static int sc18is602_remove(struct i2c_client *client)
-{
- struct sc18is602 *hw = i2c_get_clientdata(client);
- struct spi_master *master = hw->master;
-
- spi_unregister_master(master);
-
- return 0;
-}
-
static const struct i2c_device_id sc18is602_id[] = {
{ "sc18is602", sc18is602 },
{ "sc18is602b", sc18is602b },
@@ -353,7 +323,6 @@ static struct i2c_driver sc18is602_driver = {
.name = "sc18is602",
},
.probe = sc18is602_probe,
- .remove = sc18is602_remove,
.id_table = sc18is602_id,
};
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 796c077ef43..c8e795ef2e1 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -46,8 +46,6 @@
/* SPSR */
#define RXFL (1 << 2)
-#define hspi2info(h) (h->dev->platform_data)
-
struct hspi_priv {
void __iomem *addr;
struct spi_master *master;
@@ -68,6 +66,16 @@ static u32 hspi_read(struct hspi_priv *hspi, int reg)
return ioread32(hspi->addr + reg);
}
+static void hspi_bit_set(struct hspi_priv *hspi, int reg, u32 mask, u32 set)
+{
+ u32 val = hspi_read(hspi, reg);
+
+ val &= ~mask;
+ val |= set & mask;
+
+ hspi_write(hspi, reg, val);
+}
+
/*
* transfer function
*/
@@ -79,7 +87,7 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
if ((mask & hspi_read(hspi, SPSR)) == val)
return 0;
- msleep(20);
+ udelay(10);
}
dev_err(hspi->dev, "timeout\n");
@@ -89,20 +97,12 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
/*
* spi master function
*/
-static int hspi_prepare_transfer(struct spi_master *master)
-{
- struct hspi_priv *hspi = spi_master_get_devdata(master);
-
- pm_runtime_get_sync(hspi->dev);
- return 0;
-}
-static int hspi_unprepare_transfer(struct spi_master *master)
+#define hspi_hw_cs_enable(hspi) hspi_hw_cs_ctrl(hspi, 0)
+#define hspi_hw_cs_disable(hspi) hspi_hw_cs_ctrl(hspi, 1)
+static void hspi_hw_cs_ctrl(struct hspi_priv *hspi, int hi)
{
- struct hspi_priv *hspi = spi_master_get_devdata(master);
-
- pm_runtime_put_sync(hspi->dev);
- return 0;
+ hspi_bit_set(hspi, SPSCR, (1 << 6), (hi) << 6);
}
static void hspi_hw_setup(struct hspi_priv *hspi,
@@ -111,14 +111,9 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
{
struct spi_device *spi = msg->spi;
struct device *dev = hspi->dev;
- u32 target_rate;
u32 spcr, idiv_clk;
u32 rate, best_rate, min, tmp;
- target_rate = t ? t->speed_hz : 0;
- if (!target_rate)
- target_rate = spi->max_speed_hz;
-
/*
* find best IDIV/CLKCx settings
*/
@@ -135,10 +130,10 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
rate /= 16;
/* CLKCx calculation */
- rate /= (((idiv_clk & 0x1F) + 1) * 2) ;
+ rate /= (((idiv_clk & 0x1F) + 1) * 2);
/* save best settings */
- tmp = abs(target_rate - rate);
+ tmp = abs(t->speed_hz - rate);
if (tmp < min) {
min = tmp;
spcr = idiv_clk;
@@ -151,11 +146,11 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
if (spi->mode & SPI_CPOL)
spcr |= 1 << 6;
- dev_dbg(dev, "speed %d/%d\n", target_rate, best_rate);
+ dev_dbg(dev, "speed %d/%d\n", t->speed_hz, best_rate);
hspi_write(hspi, SPCR, spcr);
hspi_write(hspi, SPSR, 0x0);
- hspi_write(hspi, SPSCR, 0x1); /* master mode */
+ hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */
}
static int hspi_transfer_one_message(struct spi_master *master,
@@ -166,12 +161,21 @@ static int hspi_transfer_one_message(struct spi_master *master,
u32 tx;
u32 rx;
int ret, i;
+ unsigned int cs_change;
+ const int nsecs = 50;
dev_dbg(hspi->dev, "%s\n", __func__);
+ cs_change = 1;
ret = 0;
list_for_each_entry(t, &msg->transfers, transfer_list) {
- hspi_hw_setup(hspi, msg, t);
+
+ if (cs_change) {
+ hspi_hw_setup(hspi, msg, t);
+ hspi_hw_cs_enable(hspi);
+ ndelay(nsecs);
+ }
+ cs_change = t->cs_change;
for (i = 0; i < t->len; i++) {
@@ -186,7 +190,7 @@ static int hspi_transfer_one_message(struct spi_master *master,
hspi_write(hspi, SPTBR, tx);
- /* wait recive */
+ /* wait receive */
ret = hspi_status_check_timeout(hspi, 0x4, 0x4);
if (ret < 0)
break;
@@ -198,38 +202,28 @@ static int hspi_transfer_one_message(struct spi_master *master,
}
msg->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change) {
+ ndelay(nsecs);
+ hspi_hw_cs_disable(hspi);
+ ndelay(nsecs);
+ }
}
msg->status = ret;
+ if (!cs_change) {
+ ndelay(nsecs);
+ hspi_hw_cs_disable(hspi);
+ }
spi_finalize_current_message(master);
return ret;
}
-static int hspi_setup(struct spi_device *spi)
-{
- struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
- struct device *dev = hspi->dev;
-
- if (8 != spi->bits_per_word) {
- dev_err(dev, "bits_per_word should be 8\n");
- return -EIO;
- }
-
- dev_dbg(dev, "%s setup\n", spi->modalias);
-
- return 0;
-}
-
-static void hspi_cleanup(struct spi_device *spi)
-{
- struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
- struct device *dev = hspi->dev;
-
- dev_dbg(dev, "%s cleanup\n", spi->modalias);
-}
-
-static int __devinit hspi_probe(struct platform_device *pdev)
+static int hspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
@@ -250,15 +244,15 @@ static int __devinit hspi_probe(struct platform_device *pdev)
return -ENOMEM;
}
- clk = clk_get(NULL, "shyway_clk");
- if (!clk) {
- dev_err(&pdev->dev, "shyway_clk is required\n");
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "couldn't get clock\n");
ret = -EINVAL;
goto error0;
}
hspi = spi_master_get_devdata(master);
- dev_set_drvdata(&pdev->dev, hspi);
+ platform_set_drvdata(pdev, hspi);
/* init hspi */
hspi->master = master;
@@ -272,26 +266,25 @@ static int __devinit hspi_probe(struct platform_device *pdev)
goto error1;
}
- master->num_chipselect = 1;
+ pm_runtime_enable(&pdev->dev);
+
master->bus_num = pdev->id;
- master->setup = hspi_setup;
- master->cleanup = hspi_cleanup;
master->mode_bits = SPI_CPOL | SPI_CPHA;
- master->prepare_transfer_hardware = hspi_prepare_transfer;
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
master->transfer_one_message = hspi_transfer_one_message;
- master->unprepare_transfer_hardware = hspi_unprepare_transfer;
- ret = spi_register_master(master);
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
- goto error1;
+ goto error2;
}
- pm_runtime_enable(&pdev->dev);
-
- dev_info(&pdev->dev, "probed\n");
-
return 0;
+ error2:
+ pm_runtime_disable(&pdev->dev);
error1:
clk_put(clk);
error0:
@@ -300,24 +293,30 @@ static int __devinit hspi_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit hspi_remove(struct platform_device *pdev)
+static int hspi_remove(struct platform_device *pdev)
{
- struct hspi_priv *hspi = dev_get_drvdata(&pdev->dev);
+ struct hspi_priv *hspi = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
clk_put(hspi->clk);
- spi_unregister_master(hspi->master);
return 0;
}
+static struct of_device_id hspi_of_match[] = {
+ { .compatible = "renesas,hspi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hspi_of_match);
+
static struct platform_driver hspi_driver = {
.probe = hspi_probe,
- .remove = __devexit_p(hspi_remove),
+ .remove = hspi_remove,
.driver = {
.name = "sh-hspi",
.owner = THIS_MODULE,
+ .of_match_table = hspi_of_match,
},
};
module_platform_driver(hspi_driver);
@@ -325,4 +324,4 @@ module_platform_driver(hspi_driver);
MODULE_DESCRIPTION("SuperH HSPI bus driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
-MODULE_ALIAS("platform:sh_spi");
+MODULE_ALIAS("platform:sh-hspi");
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 1f466bc66d9..45b09142afe 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -15,58 +15,108 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/sh_msiof.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
#include <asm/unaligned.h>
+
+struct sh_msiof_chipdata {
+ u16 tx_fifo_size;
+ u16 rx_fifo_size;
+ u16 master_flags;
+};
+
struct sh_msiof_spi_priv {
- struct spi_bitbang bitbang; /* must be first for spi_bitbang.c */
void __iomem *mapbase;
struct clk *clk;
struct platform_device *pdev;
+ const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct completion done;
- unsigned long flags;
int tx_fifo_size;
int rx_fifo_size;
};
-#define TMDR1 0x00
-#define TMDR2 0x04
-#define TMDR3 0x08
-#define RMDR1 0x10
-#define RMDR2 0x14
-#define RMDR3 0x18
-#define TSCR 0x20
-#define RSCR 0x22
-#define CTR 0x28
-#define FCTR 0x30
-#define STR 0x40
-#define IER 0x44
-#define TDR1 0x48
-#define TDR2 0x4c
-#define TFDR 0x50
-#define RDR1 0x58
-#define RDR2 0x5c
-#define RFDR 0x60
-
-#define CTR_TSCKE (1 << 15)
-#define CTR_TFSE (1 << 14)
-#define CTR_TXE (1 << 9)
-#define CTR_RXE (1 << 8)
-
-#define STR_TEOF (1 << 23)
-#define STR_REOF (1 << 7)
+#define TMDR1 0x00 /* Transmit Mode Register 1 */
+#define TMDR2 0x04 /* Transmit Mode Register 2 */
+#define TMDR3 0x08 /* Transmit Mode Register 3 */
+#define RMDR1 0x10 /* Receive Mode Register 1 */
+#define RMDR2 0x14 /* Receive Mode Register 2 */
+#define RMDR3 0x18 /* Receive Mode Register 3 */
+#define TSCR 0x20 /* Transmit Clock Select Register */
+#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define CTR 0x28 /* Control Register */
+#define FCTR 0x30 /* FIFO Control Register */
+#define STR 0x40 /* Status Register */
+#define IER 0x44 /* Interrupt Enable Register */
+#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define TFDR 0x50 /* Transmit FIFO Data Register */
+#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define RFDR 0x60 /* Receive FIFO Data Register */
+
+/* TMDR1 and RMDR1 */
+#define MDR1_TRMD 0x80000000 /* Transfer Mode (1 = Master mode) */
+#define MDR1_SYNCMD_MASK 0x30000000 /* SYNC Mode */
+#define MDR1_SYNCMD_SPI 0x20000000 /* Level mode/SPI */
+#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
+#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_SHIFT 2
+#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
+/* TMDR1 */
+#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
+
+/* TMDR2 and RMDR2 */
+#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define MDR2_GRPMASK1 0x00000001 /* Group Output Mask 1 (SH, A1) */
+
+/* TSCR and RSCR */
+#define SCR_BRPS_MASK 0x1f00 /* Prescaler Setting (1-32) */
+#define SCR_BRPS(i) (((i) - 1) << 8)
+#define SCR_BRDV_MASK 0x0007 /* Baud Rate Generator's Division Ratio */
+#define SCR_BRDV_DIV_2 0x0000
+#define SCR_BRDV_DIV_4 0x0001
+#define SCR_BRDV_DIV_8 0x0002
+#define SCR_BRDV_DIV_16 0x0003
+#define SCR_BRDV_DIV_32 0x0004
+#define SCR_BRDV_DIV_1 0x0007
+
+/* CTR */
+#define CTR_TSCKIZ_MASK 0xc0000000 /* Transmit Clock I/O Polarity Select */
+#define CTR_TSCKIZ_SCK 0x80000000 /* Disable SCK when TX disabled */
+#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define CTR_RSCKIZ_MASK 0x30000000 /* Receive Clock Polarity Select */
+#define CTR_RSCKIZ_SCK 0x20000000 /* Must match CTR_TSCKIZ_SCK */
+#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define CTR_TXDIZ_MASK 0x00c00000 /* Pin Output When TX is Disabled */
+#define CTR_TXDIZ_LOW 0x00000000 /* 0 */
+#define CTR_TXDIZ_HIGH 0x00400000 /* 1 */
+#define CTR_TXDIZ_HIZ 0x00800000 /* High-impedance */
+#define CTR_TSCKE 0x00008000 /* Transmit Serial Clock Output Enable */
+#define CTR_TFSE 0x00004000 /* Transmit Frame Sync Signal Output Enable */
+#define CTR_TXE 0x00000200 /* Transmit Enable */
+#define CTR_RXE 0x00000100 /* Receive Enable */
+
+/* STR and IER */
+#define STR_TEOF 0x00800000 /* Frame Transmission End */
+#define STR_REOF 0x00000080 /* Frame Reception End */
+
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
@@ -130,28 +180,27 @@ static struct {
unsigned short div;
unsigned short scr;
} const sh_msiof_spi_clk_table[] = {
- { 1, 0x0007 },
- { 2, 0x0000 },
- { 4, 0x0001 },
- { 8, 0x0002 },
- { 16, 0x0003 },
- { 32, 0x0004 },
- { 64, 0x1f00 },
- { 128, 0x1f01 },
- { 256, 0x1f02 },
- { 512, 0x1f03 },
- { 1024, 0x1f04 },
+ { 1, SCR_BRPS( 1) | SCR_BRDV_DIV_1 },
+ { 2, SCR_BRPS( 1) | SCR_BRDV_DIV_2 },
+ { 4, SCR_BRPS( 1) | SCR_BRDV_DIV_4 },
+ { 8, SCR_BRPS( 1) | SCR_BRDV_DIV_8 },
+ { 16, SCR_BRPS( 1) | SCR_BRDV_DIV_16 },
+ { 32, SCR_BRPS( 1) | SCR_BRDV_DIV_32 },
+ { 64, SCR_BRPS(32) | SCR_BRDV_DIV_2 },
+ { 128, SCR_BRPS(32) | SCR_BRDV_DIV_4 },
+ { 256, SCR_BRPS(32) | SCR_BRDV_DIV_8 },
+ { 512, SCR_BRPS(32) | SCR_BRDV_DIV_16 },
+ { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 },
};
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
- unsigned long parent_rate,
- unsigned long spi_hz)
+ unsigned long parent_rate, u32 spi_hz)
{
unsigned long div = 1024;
size_t k;
if (!WARN_ON(!spi_hz || !parent_rate))
- div = parent_rate / spi_hz;
+ div = DIV_ROUND_UP(parent_rate, spi_hz);
/* TODO: make more fine grained */
@@ -163,12 +212,13 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1);
sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr);
- sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
+ if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
+ sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
}
static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
u32 cpol, u32 cpha,
- u32 tx_hi_z, u32 lsb_first)
+ u32 tx_hi_z, u32 lsb_first, u32 cs_high)
{
u32 tmp;
int edge;
@@ -181,18 +231,26 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
* 1 1 11 11 1 1
*/
sh_msiof_write(p, FCTR, 0);
- sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24));
- sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24));
- tmp = 0xa0000000;
- tmp |= cpol << 30; /* TSCKIZ */
- tmp |= cpol << 28; /* RSCKIZ */
+ tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
+ tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
+ tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+ sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+ if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
+ /* These bits are reserved if RX needs TX */
+ tmp &= ~0x0000ffff;
+ }
+ sh_msiof_write(p, RMDR1, tmp);
+
+ tmp = 0;
+ tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
+ tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
edge = cpol ^ !cpha;
- tmp |= edge << 27; /* TEDG */
- tmp |= edge << 26; /* REDG */
- tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */
+ tmp |= edge << CTR_TEDG_SHIFT;
+ tmp |= edge << CTR_REDG_SHIFT;
+ tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
sh_msiof_write(p, CTR, tmp);
}
@@ -200,12 +258,12 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
u32 bits, u32 words)
{
- u32 dr2 = ((bits - 1) << 24) | ((words - 1) << 16);
+ u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
- if (tx_buf)
+ if (tx_buf || (p->chipdata->master_flags & SPI_MASTER_MUST_TX))
sh_msiof_write(p, TMDR2, dr2);
else
- sh_msiof_write(p, TMDR2, dr2 | 1);
+ sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
if (rx_buf)
sh_msiof_write(p, RMDR2, dr2);
@@ -358,76 +416,45 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
}
-static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t)
+static int sh_msiof_spi_setup(struct spi_device *spi)
{
- int bits;
-
- bits = t ? t->bits_per_word : 0;
- if (!bits)
- bits = spi->bits_per_word;
- return bits;
-}
-
-static unsigned long sh_msiof_spi_hz(struct spi_device *spi,
- struct spi_transfer *t)
-{
- unsigned long hz;
-
- hz = t ? t->speed_hz : 0;
- if (!hz)
- hz = spi->max_speed_hz;
- return hz;
-}
+ struct device_node *np = spi->master->dev.of_node;
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
-static int sh_msiof_spi_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- int bits;
+ if (!np) {
+ /*
+ * Use spi->controller_data for CS (same strategy as spi_gpio),
+ * if any. otherwise let HW control CS
+ */
+ spi->cs_gpio = (uintptr_t)spi->controller_data;
+ }
- /* noting to check hz values against since parent clock is disabled */
+ /* Configure pins before deasserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST),
+ !!(spi->mode & SPI_CS_HIGH));
- bits = sh_msiof_spi_bits(spi, t);
- if (bits < 8)
- return -EINVAL;
- if (bits > 32)
- return -EINVAL;
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
- return spi_bitbang_setup_transfer(spi, t);
+ return 0;
}
-static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on)
+static int sh_msiof_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
- int value;
-
- /* chip select is active low unless SPI_CS_HIGH is set */
- if (spi->mode & SPI_CS_HIGH)
- value = (is_on == BITBANG_CS_ACTIVE) ? 1 : 0;
- else
- value = (is_on == BITBANG_CS_ACTIVE) ? 0 : 1;
-
- if (is_on == BITBANG_CS_ACTIVE) {
- if (!test_and_set_bit(0, &p->flags)) {
- pm_runtime_get_sync(&p->pdev->dev);
- clk_enable(p->clk);
- }
-
- /* Configure pins before asserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
- !!(spi->mode & SPI_CPHA),
- !!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST));
- }
-
- /* use spi->controller data for CS (same strategy as spi_gpio) */
- gpio_set_value((unsigned)spi->controller_data, value);
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ const struct spi_device *spi = msg->spi;
- if (is_on == BITBANG_CS_INACTIVE) {
- if (test_and_clear_bit(0, &p->flags)) {
- clk_disable(p->clk);
- pm_runtime_put(&p->pdev->dev);
- }
- }
+ /* Configure pins before asserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST),
+ !!(spi->mode & SPI_CS_HIGH));
+ return 0;
}
static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
@@ -464,7 +491,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
/* start by setting frame bit */
- INIT_COMPLETION(p->done);
+ reinit_completion(&p->done);
ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
if (ret) {
dev_err(&p->pdev->dev, "failed to start hardware\n");
@@ -481,7 +508,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
/* clear status bits */
sh_msiof_reset_str(p);
- /* shut down frame, tx/tx and clock signals */
+ /* shut down frame, rx/tx and clock signals */
ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
if (rx_buf)
@@ -499,9 +526,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
return ret;
}
-static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+static int sh_msiof_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
int bits;
@@ -511,7 +540,7 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
int n;
bool swab;
- bits = sh_msiof_spi_bits(spi, t);
+ bits = t->bits_per_word;
if (bits <= 8 && t->len > 15 && !(t->len & 3)) {
bits = 32;
@@ -561,8 +590,7 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
}
/* setup clocks (clock already enabled in chipselect()) */
- sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk),
- sh_msiof_spi_hz(spi, t));
+ sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
/* transfer in fifo sized chunks */
words = t->len / bytes_per_word;
@@ -582,153 +610,184 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
words -= n;
}
- return bytes_done;
+ return 0;
}
-static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
- u32 word, u8 bits)
+static const struct sh_msiof_chipdata sh_data = {
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .master_flags = 0,
+};
+
+static const struct sh_msiof_chipdata r8a779x_data = {
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 256,
+ .master_flags = SPI_MASTER_MUST_TX,
+};
+
+static const struct of_device_id sh_msiof_match[] = {
+ { .compatible = "renesas,sh-msiof", .data = &sh_data },
+ { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_msiof_match);
+
+#ifdef CONFIG_OF
+static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
{
- BUG(); /* unused but needed by bitbang code */
- return 0;
+ struct sh_msiof_spi_info *info;
+ struct device_node *np = dev->of_node;
+ u32 num_cs = 1;
+
+ info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ /* Parse the MSIOF properties */
+ of_property_read_u32(np, "num-cs", &num_cs);
+ of_property_read_u32(np, "renesas,tx-fifo-size",
+ &info->tx_fifo_override);
+ of_property_read_u32(np, "renesas,rx-fifo-size",
+ &info->rx_fifo_override);
+
+ info->num_chipselect = num_cs;
+
+ return info;
}
+#else
+static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
static int sh_msiof_spi_probe(struct platform_device *pdev)
{
struct resource *r;
struct spi_master *master;
+ const struct of_device_id *of_id;
struct sh_msiof_spi_priv *p;
- char clk_name[16];
int i;
int ret;
master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
if (master == NULL) {
dev_err(&pdev->dev, "failed to allocate spi master\n");
- ret = -ENOMEM;
- goto err0;
+ return -ENOMEM;
}
p = spi_master_get_devdata(master);
platform_set_drvdata(pdev, p);
- p->info = pdev->dev.platform_data;
+
+ of_id = of_match_device(sh_msiof_match, &pdev->dev);
+ if (of_id) {
+ p->chipdata = of_id->data;
+ p->info = sh_msiof_spi_parse_dt(&pdev->dev);
+ } else {
+ p->chipdata = (const void *)pdev->id_entry->driver_data;
+ p->info = dev_get_platdata(&pdev->dev);
+ }
+
+ if (!p->info) {
+ dev_err(&pdev->dev, "failed to obtain device info\n");
+ ret = -ENXIO;
+ goto err1;
+ }
+
init_completion(&p->done);
- snprintf(clk_name, sizeof(clk_name), "msiof%d", pdev->id);
- p->clk = clk_get(&pdev->dev, clk_name);
+ p->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
- dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err1;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i = platform_get_irq(pdev, 0);
- if (!r || i < 0) {
- dev_err(&pdev->dev, "cannot get platform resources\n");
+ if (i < 0) {
+ dev_err(&pdev->dev, "cannot get platform IRQ\n");
ret = -ENOENT;
- goto err2;
+ goto err1;
}
- p->mapbase = ioremap_nocache(r->start, resource_size(r));
- if (!p->mapbase) {
- dev_err(&pdev->dev, "unable to ioremap\n");
- ret = -ENXIO;
- goto err2;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p->mapbase = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(p->mapbase)) {
+ ret = PTR_ERR(p->mapbase);
+ goto err1;
}
- ret = request_irq(i, sh_msiof_spi_irq, 0,
- dev_name(&pdev->dev), p);
+ ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
+ dev_name(&pdev->dev), p);
if (ret) {
dev_err(&pdev->dev, "unable to request irq\n");
- goto err3;
+ goto err1;
}
p->pdev = pdev;
pm_runtime_enable(&pdev->dev);
- /* The standard version of MSIOF use 64 word FIFOs */
- p->tx_fifo_size = 64;
- p->rx_fifo_size = 64;
-
/* Platform data may override FIFO sizes */
+ p->tx_fifo_size = p->chipdata->tx_fifo_size;
+ p->rx_fifo_size = p->chipdata->rx_fifo_size;
if (p->info->tx_fifo_override)
p->tx_fifo_size = p->info->tx_fifo_override;
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
- /* init master and bitbang code */
+ /* init master code */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
- master->flags = 0;
+ master->flags = p->chipdata->master_flags;
master->bus_num = pdev->id;
+ master->dev.of_node = pdev->dev.of_node;
master->num_chipselect = p->info->num_chipselect;
- master->setup = spi_bitbang_setup;
- master->cleanup = spi_bitbang_cleanup;
-
- p->bitbang.master = master;
- p->bitbang.chipselect = sh_msiof_spi_chipselect;
- p->bitbang.setup_transfer = sh_msiof_spi_setup_transfer;
- p->bitbang.txrx_bufs = sh_msiof_spi_txrx;
- p->bitbang.txrx_word[SPI_MODE_0] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_1] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_2] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_3] = sh_msiof_spi_txrx_word;
-
- ret = spi_bitbang_start(&p->bitbang);
- if (ret == 0)
- return 0;
+ master->setup = sh_msiof_spi_setup;
+ master->prepare_message = sh_msiof_prepare_message;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ master->auto_runtime_pm = true;
+ master->transfer_one = sh_msiof_transfer_one;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master error.\n");
+ goto err2;
+ }
+
+ return 0;
- pm_runtime_disable(&pdev->dev);
- err3:
- iounmap(p->mapbase);
err2:
- clk_put(p->clk);
+ pm_runtime_disable(&pdev->dev);
err1:
spi_master_put(master);
- err0:
return ret;
}
static int sh_msiof_spi_remove(struct platform_device *pdev)
{
- struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
- int ret;
-
- ret = spi_bitbang_stop(&p->bitbang);
- if (!ret) {
- pm_runtime_disable(&pdev->dev);
- free_irq(platform_get_irq(pdev, 0), p);
- iounmap(p->mapbase);
- clk_put(p->clk);
- spi_master_put(p->bitbang.master);
- }
- return ret;
-}
-
-static int sh_msiof_spi_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
+ pm_runtime_disable(&pdev->dev);
return 0;
}
-static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = {
- .runtime_suspend = sh_msiof_spi_runtime_nop,
- .runtime_resume = sh_msiof_spi_runtime_nop,
+static struct platform_device_id spi_driver_ids[] = {
+ { "spi_sh_msiof", (kernel_ulong_t)&sh_data },
+ { "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data },
+ { "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data },
+ {},
};
+MODULE_DEVICE_TABLE(platform, spi_driver_ids);
static struct platform_driver sh_msiof_spi_drv = {
.probe = sh_msiof_spi_probe,
.remove = sh_msiof_spi_remove,
+ .id_table = spi_driver_ids,
.driver = {
.name = "spi_sh_msiof",
.owner = THIS_MODULE,
- .pm = &sh_msiof_spi_dev_pm_ops,
+ .of_match_table = of_match_ptr(sh_msiof_match),
},
};
module_platform_driver(sh_msiof_spi_drv);
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 097e506042b..b83dd733684 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -14,10 +14,8 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
@@ -109,7 +107,7 @@ static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
{
struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
- if (sp->info && sp->info->chip_select)
+ if (sp->info->chip_select)
(sp->info->chip_select)(sp->info, dev->chip_select, value);
}
@@ -130,10 +128,15 @@ static int sh_sci_spi_probe(struct platform_device *dev)
sp = spi_master_get_devdata(master);
platform_set_drvdata(dev, sp);
- sp->info = dev->dev.platform_data;
+ sp->info = dev_get_platdata(&dev->dev);
+ if (!sp->info) {
+ dev_err(&dev->dev, "platform data is missing\n");
+ ret = -ENOENT;
+ goto err1;
+ }
/* setup spi bitbang adaptor */
- sp->bitbang.master = spi_master_get(master);
+ sp->bitbang.master = master;
sp->bitbang.master->bus_num = sp->info->bus_num;
sp->bitbang.master->num_chipselect = sp->info->num_chipselect;
sp->bitbang.chipselect = sh_sci_spi_chipselect;
@@ -172,9 +175,9 @@ static int sh_sci_spi_remove(struct platform_device *dev)
{
struct sh_sci_spi *sp = platform_get_drvdata(dev);
- iounmap(sp->membase);
- setbits(sp, PIN_INIT, 0);
spi_bitbang_stop(&sp->bitbang);
+ setbits(sp, PIN_INIT, 0);
+ iounmap(sp->membase);
spi_master_put(sp->bitbang.master);
return 0;
}
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 79442c31bcd..03edf5ed0e9 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -171,7 +171,6 @@ static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
int remain = t->len;
int cur_len;
unsigned char *data;
- unsigned long tmp;
long ret;
if (t->len)
@@ -213,9 +212,7 @@ static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
}
if (list_is_last(&t->transfer_list, &mesg->transfers)) {
- tmp = spi_sh_read(ss, SPI_SH_CR1);
- tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
- spi_sh_write(ss, tmp, SPI_SH_CR1);
+ spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
ss->cr1 &= ~SPI_SH_TBE;
@@ -239,7 +236,6 @@ static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
int remain = t->len;
int cur_len;
unsigned char *data;
- unsigned long tmp;
long ret;
if (t->len > SPI_SH_MAX_BYTE)
@@ -247,9 +243,7 @@ static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
else
spi_sh_write(ss, t->len, SPI_SH_CR3);
- tmp = spi_sh_read(ss, SPI_SH_CR1);
- tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
- spi_sh_write(ss, tmp, SPI_SH_CR1);
+ spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
spi_sh_wait_write_buffer_empty(ss);
@@ -328,7 +322,8 @@ static void spi_sh_work(struct work_struct *work)
spin_lock_irqsave(&ss->lock, flags);
mesg->status = 0;
- mesg->complete(mesg->context);
+ if (mesg->complete)
+ mesg->complete(mesg->context);
}
clear_fifo(ss);
@@ -346,7 +341,8 @@ static void spi_sh_work(struct work_struct *work)
error:
mesg->status = ret;
- mesg->complete(mesg->context);
+ if (mesg->complete)
+ mesg->complete(mesg->context);
spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
SPI_SH_CR1);
@@ -358,9 +354,6 @@ static int spi_sh_setup(struct spi_device *spi)
{
struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
pr_debug("%s: enter\n", __func__);
spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
@@ -432,9 +425,9 @@ static irqreturn_t spi_sh_irq(int irq, void *_ss)
return IRQ_HANDLED;
}
-static int __devexit spi_sh_remove(struct platform_device *pdev)
+static int spi_sh_remove(struct platform_device *pdev)
{
- struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev);
+ struct spi_sh_data *ss = platform_get_drvdata(pdev);
spi_unregister_master(ss->master);
destroy_workqueue(ss->workqueue);
@@ -444,7 +437,7 @@ static int __devexit spi_sh_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit spi_sh_probe(struct platform_device *pdev)
+static int spi_sh_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
@@ -471,7 +464,7 @@ static int __devinit spi_sh_probe(struct platform_device *pdev)
}
ss = spi_master_get_devdata(master);
- dev_set_drvdata(&pdev->dev, ss);
+ platform_set_drvdata(pdev, ss);
switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_8BIT:
@@ -539,7 +532,7 @@ static int __devinit spi_sh_probe(struct platform_device *pdev)
static struct platform_driver spi_sh_driver = {
.probe = spi_sh_probe,
- .remove = __devexit_p(spi_sh_remove),
+ .remove = spi_sh_remove,
.driver = {
.name = "sh_spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index ecc3d9763d1..95ac276eaaf 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/clk.h>
+#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -19,7 +20,9 @@
#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <linux/pinctrl/consumer.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
#define DRIVER_NAME "sirfsoc_spi"
@@ -83,6 +86,7 @@
#define SIRFSOC_SPI_TX_DONE BIT(1)
#define SIRFSOC_SPI_RX_OFLOW BIT(2)
#define SIRFSOC_SPI_TX_UFLOW BIT(3)
+#define SIRFSOC_SPI_RX_IO_DMA BIT(4)
#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
@@ -120,14 +124,25 @@
#define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20)
#define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2)
+/*
+ * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
+ * due to the limitation of dma controller
+ */
+
+#define ALIGNED(x) (!((u32)x & 0x3))
+#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
+ ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
+
+#define SIRFSOC_MAX_CMD_BYTES 4
+
struct sirfsoc_spi {
struct spi_bitbang bitbang;
- struct completion done;
+ struct completion rx_done;
+ struct completion tx_done;
void __iomem *base;
u32 ctrl_freq; /* SPI controller clock speed */
struct clk *clk;
- struct pinctrl *p;
/* rx & tx bufs from the spi_transfer */
const void *tx;
@@ -139,11 +154,22 @@ struct sirfsoc_spi {
void (*tx_word) (struct sirfsoc_spi *);
/* number of words left to be tranmitted/received */
- unsigned int left_tx_cnt;
- unsigned int left_rx_cnt;
+ unsigned int left_tx_word;
+ unsigned int left_rx_word;
- /* tasklet to push tx msg into FIFO */
- struct tasklet_struct tasklet_tx;
+ /* rx & tx DMA channels */
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+ dma_addr_t src_start;
+ dma_addr_t dst_start;
+ void *dummypage;
+ int word_width; /* in bytes */
+
+ /*
+ * if tx size is not more than 4 and rx size is NULL, use
+ * command model
+ */
+ bool tx_by_cmd;
int chipselect[0];
};
@@ -160,7 +186,7 @@ static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
sspi->rx = rx;
}
- sspi->left_rx_cnt--;
+ sspi->left_rx_word--;
}
static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
@@ -174,7 +200,7 @@ static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
}
writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
- sspi->left_tx_cnt--;
+ sspi->left_tx_word--;
}
static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
@@ -189,7 +215,7 @@ static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
sspi->rx = rx;
}
- sspi->left_rx_cnt--;
+ sspi->left_rx_word--;
}
static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
@@ -203,7 +229,7 @@ static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
}
writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
- sspi->left_tx_cnt--;
+ sspi->left_tx_word--;
}
static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
@@ -218,7 +244,7 @@ static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
sspi->rx = rx;
}
- sspi->left_rx_cnt--;
+ sspi->left_rx_word--;
}
@@ -233,113 +259,225 @@ static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
}
writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
- sspi->left_tx_cnt--;
-}
-
-static void spi_sirfsoc_tasklet_tx(unsigned long arg)
-{
- struct sirfsoc_spi *sspi = (struct sirfsoc_spi *)arg;
-
- /* Fill Tx FIFO while there are left words to be transmitted */
- while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS) &
- SIRFSOC_SPI_FIFO_FULL)) &&
- sspi->left_tx_cnt)
- sspi->tx_word(sspi);
+ sspi->left_tx_word--;
}
static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
{
struct sirfsoc_spi *sspi = dev_id;
u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
-
- writel(spi_stat, sspi->base + SIRFSOC_SPI_INT_STATUS);
+ if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
+ complete(&sspi->tx_done);
+ writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_INT_MASK_ALL,
+ sspi->base + SIRFSOC_SPI_INT_STATUS);
+ return IRQ_HANDLED;
+ }
/* Error Conditions */
if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
spi_stat & SIRFSOC_SPI_TX_UFLOW) {
- complete(&sspi->done);
+ complete(&sspi->tx_done);
+ complete(&sspi->rx_done);
writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_INT_MASK_ALL,
+ sspi->base + SIRFSOC_SPI_INT_STATUS);
+ return IRQ_HANDLED;
}
+ if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
+ complete(&sspi->tx_done);
+ while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) &
+ SIRFSOC_SPI_RX_IO_DMA))
+ cpu_relax();
+ complete(&sspi->rx_done);
+ writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_INT_MASK_ALL,
+ sspi->base + SIRFSOC_SPI_INT_STATUS);
- if (spi_stat & SIRFSOC_SPI_FRM_END) {
- while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
- & SIRFSOC_SPI_FIFO_EMPTY)) &&
- sspi->left_rx_cnt)
- sspi->rx_word(sspi);
-
- /* Received all words */
- if ((sspi->left_rx_cnt == 0) && (sspi->left_tx_cnt == 0)) {
- complete(&sspi->done);
- writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
- }
- }
+ return IRQ_HANDLED;
+}
- if (spi_stat & SIRFSOC_SPI_RXFIFO_THD_REACH ||
- spi_stat & SIRFSOC_SPI_TXFIFO_THD_REACH ||
- spi_stat & SIRFSOC_SPI_RX_FIFO_FULL ||
- spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
- tasklet_schedule(&sspi->tasklet_tx);
+static void spi_sirfsoc_dma_fini_callback(void *data)
+{
+ struct completion *dma_complete = data;
- return IRQ_HANDLED;
+ complete(dma_complete);
}
-static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
+static int spi_sirfsoc_cmd_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
{
struct sirfsoc_spi *sspi;
int timeout = t->len * 10;
+ u32 cmd;
+
sspi = spi_master_get_devdata(spi->master);
+ memcpy(&cmd, sspi->tx, t->len);
+ if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
+ cmd = cpu_to_be32(cmd) >>
+ ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
+ if (sspi->word_width == 2 && t->len == 4 &&
+ (!(spi->mode & SPI_LSB_FIRST)))
+ cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
+ writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
+ writel(SIRFSOC_SPI_FRM_END_INT_EN,
+ sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_CMD_TX_EN,
+ sspi->base + SIRFSOC_SPI_TX_RX_EN);
+ if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
+ dev_err(&spi->dev, "cmd transfer timeout\n");
+ return 0;
+ }
- sspi->tx = t->tx_buf;
- sspi->rx = t->rx_buf;
- sspi->left_tx_cnt = sspi->left_rx_cnt = t->len;
- INIT_COMPLETION(sspi->done);
+ return t->len;
+}
- writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
+static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct sirfsoc_spi *sspi;
+ struct dma_async_tx_descriptor *rx_desc, *tx_desc;
+ int timeout = t->len * 10;
- if (t->len == 1) {
- writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
- SIRFSOC_SPI_ENA_AUTO_CLR,
- sspi->base + SIRFSOC_SPI_CTRL);
- writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
- writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
- } else if ((t->len > 1) && (t->len < SIRFSOC_SPI_DAT_FRM_LEN_MAX)) {
+ sspi = spi_master_get_devdata(spi->master);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
+ if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) {
writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
- SIRFSOC_SPI_MUL_DAT_MODE |
- SIRFSOC_SPI_ENA_AUTO_CLR,
+ SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE,
sspi->base + SIRFSOC_SPI_CTRL);
- writel(t->len - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
- writel(t->len - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ writel(sspi->left_tx_word - 1,
+ sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
+ writel(sspi->left_tx_word - 1,
+ sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
} else {
writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
sspi->base + SIRFSOC_SPI_CTRL);
writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
}
-
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
-
- /* Send the first word to trigger the whole tx/rx process */
- sspi->tx_word(sspi);
-
- writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
- SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
- SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
- SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
- writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN);
-
- if (wait_for_completion_timeout(&sspi->done, timeout) == 0)
+ sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
+ (t->tx_buf != t->rx_buf) ?
+ DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
+ rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
+ sspi->dst_start, t->len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ rx_desc->callback = spi_sirfsoc_dma_fini_callback;
+ rx_desc->callback_param = &sspi->rx_done;
+
+ sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
+ (t->tx_buf != t->rx_buf) ?
+ DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+ tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
+ sspi->src_start, t->len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ tx_desc->callback = spi_sirfsoc_dma_fini_callback;
+ tx_desc->callback_param = &sspi->tx_done;
+
+ dmaengine_submit(tx_desc);
+ dmaengine_submit(rx_desc);
+ dma_async_issue_pending(sspi->tx_chan);
+ dma_async_issue_pending(sspi->rx_chan);
+ writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
+ sspi->base + SIRFSOC_SPI_TX_RX_EN);
+ if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
dev_err(&spi->dev, "transfer timeout\n");
-
+ dmaengine_terminate_all(sspi->rx_chan);
+ } else
+ sspi->left_rx_word = 0;
+ /*
+ * we only wait tx-done event if transferring by DMA. for PIO,
+ * we get rx data by writing tx data, so if rx is done, tx has
+ * done earlier
+ */
+ if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
+ dev_err(&spi->dev, "transfer timeout\n");
+ dmaengine_terminate_all(sspi->tx_chan);
+ }
+ dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
+ dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
/* TX, RX FIFO stop */
writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
- writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
+ if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX)
+ writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
+}
+
+static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct sirfsoc_spi *sspi;
+ int timeout = t->len * 10;
+
+ sspi = spi_master_get_devdata(spi->master);
+ do {
+ writel(SIRFSOC_SPI_FIFO_RESET,
+ sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_RESET,
+ sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_INT_MASK_ALL,
+ sspi->base + SIRFSOC_SPI_INT_STATUS);
+ writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
+ SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR,
+ sspi->base + SIRFSOC_SPI_CTRL);
+ writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width))
+ - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
+ writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width))
+ - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
+ & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word)
+ sspi->tx_word(sspi);
+ writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
+ SIRFSOC_SPI_TX_UFLOW_INT_EN |
+ SIRFSOC_SPI_RX_OFLOW_INT_EN,
+ sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
+ sspi->base + SIRFSOC_SPI_TX_RX_EN);
+ if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
+ !wait_for_completion_timeout(&sspi->rx_done, timeout)) {
+ dev_err(&spi->dev, "transfer timeout\n");
+ break;
+ }
+ while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
+ & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word)
+ sspi->rx_word(sspi);
+ writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
+}
- return t->len - sspi->left_rx_cnt;
+static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct sirfsoc_spi *sspi;
+ sspi = spi_master_get_devdata(spi->master);
+
+ sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
+ sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
+ sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
+ reinit_completion(&sspi->rx_done);
+ reinit_completion(&sspi->tx_done);
+ /*
+ * in the transfer, if transfer data using command register with rx_buf
+ * null, just fill command data into command register and wait for its
+ * completion.
+ */
+ if (sspi->tx_by_cmd)
+ spi_sirfsoc_cmd_transfer(spi, t);
+ else if (IS_DMA_VALID(t))
+ spi_sirfsoc_dma_transfer(spi, t);
+ else
+ spi_sirfsoc_pio_transfer(spi, t);
+
+ return t->len - sspi->left_rx_word * sspi->word_width;
}
static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
@@ -348,7 +486,6 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
if (sspi->chipselect[spi->chip_select] == 0) {
u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
- regval |= SIRFSOC_SPI_CS_IO_OUT;
switch (value) {
case BITBANG_CS_ACTIVE:
if (spi->mode & SPI_CS_HIGH)
@@ -366,7 +503,16 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
} else {
int gpio = sspi->chipselect[spi->chip_select];
- gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+ switch (value) {
+ case BITBANG_CS_ACTIVE:
+ gpio_direction_output(gpio,
+ spi->mode & SPI_CS_HIGH ? 1 : 0);
+ break;
+ case BITBANG_CS_INACTIVE:
+ gpio_direction_output(gpio,
+ spi->mode & SPI_CS_HIGH ? 0 : 1);
+ break;
+ }
}
}
@@ -382,15 +528,10 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
sspi = spi_master_get_devdata(spi->master);
- bits_per_word = t && t->bits_per_word ? t->bits_per_word :
- spi->bits_per_word;
+ bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
- /* Enable IO mode for RX, TX */
- writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
- writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
regval = (sspi->ctrl_freq / (2 * hz)) - 1;
-
if (regval > 0xFFFF || regval < 0) {
dev_err(&spi->dev, "Speed %d not supported\n", hz);
return -EINVAL;
@@ -401,37 +542,30 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
sspi->rx_word = spi_sirfsoc_rx_word_u8;
sspi->tx_word = spi_sirfsoc_tx_word_u8;
- txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_BYTE;
- rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_BYTE;
break;
case 12:
case 16:
- regval |= (bits_per_word == 12) ? SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
+ regval |= (bits_per_word == 12) ?
+ SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
sspi->rx_word = spi_sirfsoc_rx_word_u16;
sspi->tx_word = spi_sirfsoc_tx_word_u16;
- txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_WORD;
- rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_WORD;
break;
case 32:
regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
sspi->rx_word = spi_sirfsoc_rx_word_u32;
sspi->tx_word = spi_sirfsoc_tx_word_u32;
- txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_DWORD;
- rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_DWORD;
break;
default:
- dev_err(&spi->dev, "Bits per word %d not supported\n",
- bits_per_word);
- return -EINVAL;
+ BUG();
}
+ sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
+ txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ sspi->word_width;
+ rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ sspi->word_width;
+
if (!(spi->mode & SPI_CS_HIGH))
regval |= SIRFSOC_SPI_CS_IDLE_STAT;
if (!(spi->mode & SPI_LSB_FIRST))
@@ -440,8 +574,8 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
/*
- * Data should be driven at least 1/2 cycle before the fetch edge to make
- * sure that data gets stable at the fetch edge.
+ * Data should be driven at least 1/2 cycle before the fetch edge
+ * to make sure that data gets stable at the fetch edge.
*/
if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
(!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
@@ -460,26 +594,46 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
+ if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
+ regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
+ SIRFSOC_SPI_CMD_MODE);
+ sspi->tx_by_cmd = true;
+ } else {
+ regval &= ~SIRFSOC_SPI_CMD_MODE;
+ sspi->tx_by_cmd = false;
+ }
+ /*
+ * set spi controller in RISC chipselect mode, we are controlling CS by
+ * software BITBANG_CS_ACTIVE and BITBANG_CS_INACTIVE.
+ */
+ regval |= SIRFSOC_SPI_CS_IO_MODE;
writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
+
+ if (IS_DMA_VALID(t)) {
+ /* Enable DMA mode for RX, TX */
+ writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
+ writel(SIRFSOC_SPI_RX_DMA_FLUSH,
+ sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
+ } else {
+ /* Enable IO mode for RX, TX */
+ writel(SIRFSOC_SPI_IO_MODE_SEL,
+ sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
+ writel(SIRFSOC_SPI_IO_MODE_SEL,
+ sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
+ }
+
return 0;
}
static int spi_sirfsoc_setup(struct spi_device *spi)
{
- struct sirfsoc_spi *sspi;
-
if (!spi->max_speed_hz)
return -EINVAL;
- sspi = spi_master_get_devdata(spi->master);
-
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
return spi_sirfsoc_setup_transfer(spi, NULL);
}
-static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
+static int spi_sirfsoc_probe(struct platform_device *pdev)
{
struct sirfsoc_spi *sspi;
struct spi_master *master;
@@ -495,7 +649,8 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
goto err_cs;
}
- master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs);
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(*sspi) + sizeof(int) * num_cs);
if (!master) {
dev_err(&pdev->dev, "Unable to allocate SPI master\n");
return -ENOMEM;
@@ -503,12 +658,6 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res) {
- dev_err(&pdev->dev, "Unable to get IO resource\n");
- ret = -ENODEV;
- goto free_master;
- }
master->num_chipselect = num_cs;
for (i = 0; i < master->num_chipselect; i++) {
@@ -535,10 +684,10 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
}
}
- sspi->base = devm_request_and_ioremap(&pdev->dev, mem_res);
- if (!sspi->base) {
- dev_err(&pdev->dev, "IO remap failed!\n");
- ret = -ENOMEM;
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(sspi->base)) {
+ ret = PTR_ERR(sspi->base);
goto free_master;
}
@@ -552,31 +701,41 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
if (ret)
goto free_master;
- sspi->bitbang.master = spi_master_get(master);
+ sspi->bitbang.master = master;
sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
sspi->bitbang.master->setup = spi_sirfsoc_setup;
master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
- sspi->p = pinctrl_get_select_default(&pdev->dev);
- ret = IS_ERR(sspi->p);
- if (ret)
+ /* request DMA channels */
+ sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
+ if (!sspi->rx_chan) {
+ dev_err(&pdev->dev, "can not allocate rx dma channel\n");
+ ret = -ENODEV;
goto free_master;
+ }
+ sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
+ if (!sspi->tx_chan) {
+ dev_err(&pdev->dev, "can not allocate tx dma channel\n");
+ ret = -ENODEV;
+ goto free_rx_dma;
+ }
sspi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(sspi->clk)) {
- ret = -EINVAL;
- goto free_pin;
+ ret = PTR_ERR(sspi->clk);
+ goto free_tx_dma;
}
- clk_enable(sspi->clk);
+ clk_prepare_enable(sspi->clk);
sspi->ctrl_freq = clk_get_rate(sspi->clk);
- init_completion(&sspi->done);
-
- tasklet_init(&sspi->tasklet_tx, spi_sirfsoc_tasklet_tx,
- (unsigned long)sspi);
+ init_completion(&sspi->rx_done);
+ init_completion(&sspi->tx_done);
writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
@@ -585,26 +744,35 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
/* We are not using dummy delay between command and data */
writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
+ sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
+ if (!sspi->dummypage) {
+ ret = -ENOMEM;
+ goto free_clk;
+ }
+
ret = spi_bitbang_start(&sspi->bitbang);
if (ret)
- goto free_clk;
+ goto free_dummypage;
dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
return 0;
-
+free_dummypage:
+ kfree(sspi->dummypage);
free_clk:
- clk_disable(sspi->clk);
+ clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
-free_pin:
- pinctrl_put(sspi->p);
+free_tx_dma:
+ dma_release_channel(sspi->tx_chan);
+free_rx_dma:
+ dma_release_channel(sspi->rx_chan);
free_master:
spi_master_put(master);
err_cs:
return ret;
}
-static int __devexit spi_sirfsoc_remove(struct platform_device *pdev)
+static int spi_sirfsoc_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct sirfsoc_spi *sspi;
@@ -618,19 +786,25 @@ static int __devexit spi_sirfsoc_remove(struct platform_device *pdev)
if (sspi->chipselect[i] > 0)
gpio_free(sspi->chipselect[i]);
}
- clk_disable(sspi->clk);
+ kfree(sspi->dummypage);
+ clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
- pinctrl_put(sspi->p);
+ dma_release_channel(sspi->rx_chan);
+ dma_release_channel(sspi->tx_chan);
spi_master_put(master);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int spi_sirfsoc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
clk_disable(sspi->clk);
return 0;
@@ -638,8 +812,7 @@ static int spi_sirfsoc_suspend(struct device *dev)
static int spi_sirfsoc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
clk_enable(sspi->clk);
@@ -648,36 +821,32 @@ static int spi_sirfsoc_resume(struct device *dev)
writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- return 0;
+ return spi_master_resume(master);
}
-
-static const struct dev_pm_ops spi_sirfsoc_pm_ops = {
- .suspend = spi_sirfsoc_suspend,
- .resume = spi_sirfsoc_resume,
-};
#endif
+static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
+ spi_sirfsoc_resume);
+
static const struct of_device_id spi_sirfsoc_of_match[] = {
{ .compatible = "sirf,prima2-spi", },
+ { .compatible = "sirf,marco-spi", },
{}
};
-MODULE_DEVICE_TABLE(of, sirfsoc_spi_of_match);
+MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
static struct platform_driver spi_sirfsoc_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &spi_sirfsoc_pm_ops,
-#endif
.of_match_table = spi_sirfsoc_of_match,
},
.probe = spi_sirfsoc_probe,
- .remove = __devexit_p(spi_sirfsoc_remove),
+ .remove = spi_sirfsoc_remove,
};
module_platform_driver(spi_sirfsoc_driver);
-
MODULE_DESCRIPTION("SiRF SoC SPI master driver");
-MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>, "
- "Barry Song <Baohua.Song@csr.com>");
+MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
+MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-stmp.c b/drivers/spi/spi-stmp.c
deleted file mode 100644
index 911e904b3c8..00000000000
--- a/drivers/spi/spi-stmp.c
+++ /dev/null
@@ -1,664 +0,0 @@
-/*
- * Freescale STMP378X SPI master driver
- *
- * Author: dmitry pervushin <dimka@embeddedalley.com>
- *
- * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- */
-
-/*
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-
-#include <mach/platform.h>
-#include <mach/stmp3xxx.h>
-#include <mach/dma.h>
-#include <mach/regs-ssp.h>
-#include <mach/regs-apbh.h>
-
-
-/* 0 means DMA mode(recommended, default), !0 - PIO mode */
-static int pio;
-static int clock;
-
-/* default timeout for busy waits is 2 seconds */
-#define STMP_SPI_TIMEOUT (2 * HZ)
-
-struct stmp_spi {
- int id;
-
- void * __iomem regs; /* vaddr of the control registers */
-
- int irq, err_irq;
- u32 dma;
- struct stmp3xxx_dma_descriptor d;
-
- u32 speed_khz;
- u32 saved_timings;
- u32 divider;
-
- struct clk *clk;
- struct device *master_dev;
-
- struct work_struct work;
- struct workqueue_struct *workqueue;
-
- /* lock protects queue access */
- spinlock_t lock;
- struct list_head queue;
-
- struct completion done;
-};
-
-#define busy_wait(cond) \
- ({ \
- unsigned long end_jiffies = jiffies + STMP_SPI_TIMEOUT; \
- bool succeeded = false; \
- do { \
- if (cond) { \
- succeeded = true; \
- break; \
- } \
- cpu_relax(); \
- } while (time_before(jiffies, end_jiffies)); \
- succeeded; \
- })
-
-/**
- * stmp_spi_init_hw
- * Initialize the SSP port
- */
-static int stmp_spi_init_hw(struct stmp_spi *ss)
-{
- int err = 0;
- void *pins = ss->master_dev->platform_data;
-
- err = stmp3xxx_request_pin_group(pins, dev_name(ss->master_dev));
- if (err)
- goto out;
-
- ss->clk = clk_get(NULL, "ssp");
- if (IS_ERR(ss->clk)) {
- err = PTR_ERR(ss->clk);
- goto out_free_pins;
- }
- clk_enable(ss->clk);
-
- stmp3xxx_reset_block(ss->regs, false);
- stmp3xxx_dma_reset_channel(ss->dma);
-
- return 0;
-
-out_free_pins:
- stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
-out:
- return err;
-}
-
-static void stmp_spi_release_hw(struct stmp_spi *ss)
-{
- void *pins = ss->master_dev->platform_data;
-
- if (ss->clk && !IS_ERR(ss->clk)) {
- clk_disable(ss->clk);
- clk_put(ss->clk);
- }
- stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
-}
-
-static int stmp_spi_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- u8 bits_per_word;
- u32 hz;
- struct stmp_spi *ss = spi_master_get_devdata(spi->master);
- u16 rate;
-
- bits_per_word = spi->bits_per_word;
- if (t && t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
- /*
- * Calculate speed:
- * - by default, use maximum speed from ssp clk
- * - if device overrides it, use it
- * - if transfer specifies other speed, use transfer's one
- */
- hz = 1000 * ss->speed_khz / ss->divider;
- if (spi->max_speed_hz)
- hz = min(hz, spi->max_speed_hz);
- if (t && t->speed_hz)
- hz = min(hz, t->speed_hz);
-
- if (hz == 0) {
- dev_err(&spi->dev, "Cannot continue with zero clock\n");
- return -EINVAL;
- }
-
- if (bits_per_word != 8) {
- dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, bits_per_word);
- return -EINVAL;
- }
-
- dev_dbg(&spi->dev, "Requested clk rate = %uHz, max = %uHz/%d = %uHz\n",
- hz, ss->speed_khz, ss->divider,
- ss->speed_khz * 1000 / ss->divider);
-
- if (ss->speed_khz * 1000 / ss->divider < hz) {
- dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
- __func__, hz);
- return -EINVAL;
- }
-
- rate = 1000 * ss->speed_khz/ss->divider/hz;
-
- writel(BF(ss->divider, SSP_TIMING_CLOCK_DIVIDE) |
- BF(rate - 1, SSP_TIMING_CLOCK_RATE),
- HW_SSP_TIMING + ss->regs);
-
- writel(BF(1 /* mode SPI */, SSP_CTRL1_SSP_MODE) |
- BF(4 /* 8 bits */, SSP_CTRL1_WORD_LENGTH) |
- ((spi->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
- ((spi->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0) |
- (pio ? 0 : BM_SSP_CTRL1_DMA_ENABLE),
- ss->regs + HW_SSP_CTRL1);
-
- return 0;
-}
-
-static int stmp_spi_setup(struct spi_device *spi)
-{
- /* spi_setup() does basic checks,
- * stmp_spi_setup_transfer() does more later
- */
- if (spi->bits_per_word != 8) {
- dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, spi->bits_per_word);
- return -EINVAL;
- }
- return 0;
-}
-
-static inline u32 stmp_spi_cs(unsigned cs)
-{
- return ((cs & 1) ? BM_SSP_CTRL0_WAIT_FOR_CMD : 0) |
- ((cs & 2) ? BM_SSP_CTRL0_WAIT_FOR_IRQ : 0);
-}
-
-static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs,
- unsigned char *buf, dma_addr_t dma_buf, int len,
- int first, int last, bool write)
-{
- u32 c0 = 0;
- dma_addr_t spi_buf_dma = dma_buf;
- int status = 0;
- enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- c0 |= (first ? BM_SSP_CTRL0_LOCK_CS : 0);
- c0 |= (last ? BM_SSP_CTRL0_IGNORE_CRC : 0);
- c0 |= (write ? 0 : BM_SSP_CTRL0_READ);
- c0 |= BM_SSP_CTRL0_DATA_XFER;
-
- c0 |= stmp_spi_cs(cs);
-
- c0 |= BF(len, SSP_CTRL0_XFER_COUNT);
-
- if (!dma_buf)
- spi_buf_dma = dma_map_single(ss->master_dev, buf, len, dir);
-
- ss->d.command->cmd =
- BF(len, APBH_CHn_CMD_XFER_COUNT) |
- BF(1, APBH_CHn_CMD_CMDWORDS) |
- BM_APBH_CHn_CMD_WAIT4ENDCMD |
- BM_APBH_CHn_CMD_IRQONCMPLT |
- BF(write ? BV_APBH_CHn_CMD_COMMAND__DMA_READ :
- BV_APBH_CHn_CMD_COMMAND__DMA_WRITE,
- APBH_CHn_CMD_COMMAND);
- ss->d.command->pio_words[0] = c0;
- ss->d.command->buf_ptr = spi_buf_dma;
-
- stmp3xxx_dma_reset_channel(ss->dma);
- stmp3xxx_dma_clear_interrupt(ss->dma);
- stmp3xxx_dma_enable_interrupt(ss->dma);
- init_completion(&ss->done);
- stmp3xxx_dma_go(ss->dma, &ss->d, 1);
- wait_for_completion(&ss->done);
-
- if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN))
- status = -ETIMEDOUT;
-
- if (!dma_buf)
- dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir);
-
- return status;
-}
-
-static inline void stmp_spi_enable(struct stmp_spi *ss)
-{
- stmp3xxx_setl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
- stmp3xxx_clearl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
-}
-
-static inline void stmp_spi_disable(struct stmp_spi *ss)
-{
- stmp3xxx_clearl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
- stmp3xxx_setl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
-}
-
-static int stmp_spi_txrx_pio(struct stmp_spi *ss, int cs,
- unsigned char *buf, int len,
- bool first, bool last, bool write)
-{
- if (first)
- stmp_spi_enable(ss);
-
- stmp3xxx_setl(stmp_spi_cs(cs), ss->regs + HW_SSP_CTRL0);
-
- while (len--) {
- if (last && len <= 0)
- stmp_spi_disable(ss);
-
- stmp3xxx_clearl(BM_SSP_CTRL0_XFER_COUNT,
- ss->regs + HW_SSP_CTRL0);
- stmp3xxx_setl(1, ss->regs + HW_SSP_CTRL0);
-
- if (write)
- stmp3xxx_clearl(BM_SSP_CTRL0_READ,
- ss->regs + HW_SSP_CTRL0);
- else
- stmp3xxx_setl(BM_SSP_CTRL0_READ,
- ss->regs + HW_SSP_CTRL0);
-
- /* Run! */
- stmp3xxx_setl(BM_SSP_CTRL0_RUN, ss->regs + HW_SSP_CTRL0);
-
- if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
- BM_SSP_CTRL0_RUN))
- break;
-
- if (write)
- writel(*buf, ss->regs + HW_SSP_DATA);
-
- /* Set TRANSFER */
- stmp3xxx_setl(BM_SSP_CTRL0_DATA_XFER, ss->regs + HW_SSP_CTRL0);
-
- if (!write) {
- if (busy_wait((readl(ss->regs + HW_SSP_STATUS) &
- BM_SSP_STATUS_FIFO_EMPTY)))
- break;
- *buf = readl(ss->regs + HW_SSP_DATA) & 0xFF;
- }
-
- if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
- BM_SSP_CTRL0_RUN))
- break;
-
- /* advance to the next byte */
- buf++;
- }
-
- return len < 0 ? 0 : -ETIMEDOUT;
-}
-
-static int stmp_spi_handle_message(struct stmp_spi *ss, struct spi_message *m)
-{
- bool first, last;
- struct spi_transfer *t, *tmp_t;
- int status = 0;
- int cs;
-
- cs = m->spi->chip_select;
-
- list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
-
- first = (&t->transfer_list == m->transfers.next);
- last = (&t->transfer_list == m->transfers.prev);
-
- if (first || t->speed_hz || t->bits_per_word)
- stmp_spi_setup_transfer(m->spi, t);
-
- /* reject "not last" transfers which request to change cs */
- if (t->cs_change && !last) {
- dev_err(&m->spi->dev,
- "Message with t->cs_change has been skipped\n");
- continue;
- }
-
- if (t->tx_buf) {
- status = pio ?
- stmp_spi_txrx_pio(ss, cs, (void *)t->tx_buf,
- t->len, first, last, true) :
- stmp_spi_txrx_dma(ss, cs, (void *)t->tx_buf,
- t->tx_dma, t->len, first, last, true);
-#ifdef DEBUG
- if (t->len < 0x10)
- print_hex_dump_bytes("Tx ",
- DUMP_PREFIX_OFFSET,
- t->tx_buf, t->len);
- else
- pr_debug("Tx: %d bytes\n", t->len);
-#endif
- }
- if (t->rx_buf) {
- status = pio ?
- stmp_spi_txrx_pio(ss, cs, t->rx_buf,
- t->len, first, last, false) :
- stmp_spi_txrx_dma(ss, cs, t->rx_buf,
- t->rx_dma, t->len, first, last, false);
-#ifdef DEBUG
- if (t->len < 0x10)
- print_hex_dump_bytes("Rx ",
- DUMP_PREFIX_OFFSET,
- t->rx_buf, t->len);
- else
- pr_debug("Rx: %d bytes\n", t->len);
-#endif
- }
-
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- if (status)
- break;
-
- }
- return status;
-}
-
-/**
- * stmp_spi_handle - handle messages from the queue
- */
-static void stmp_spi_handle(struct work_struct *w)
-{
- struct stmp_spi *ss = container_of(w, struct stmp_spi, work);
- unsigned long flags;
- struct spi_message *m;
-
- spin_lock_irqsave(&ss->lock, flags);
- while (!list_empty(&ss->queue)) {
- m = list_entry(ss->queue.next, struct spi_message, queue);
- list_del_init(&m->queue);
- spin_unlock_irqrestore(&ss->lock, flags);
-
- m->status = stmp_spi_handle_message(ss, m);
- m->complete(m->context);
-
- spin_lock_irqsave(&ss->lock, flags);
- }
- spin_unlock_irqrestore(&ss->lock, flags);
-
- return;
-}
-
-/**
- * stmp_spi_transfer - perform message transfer.
- * Called indirectly from spi_async, queues all the messages to
- * spi_handle_message.
- * @spi: spi device
- * @m: message to be queued
- */
-static int stmp_spi_transfer(struct spi_device *spi, struct spi_message *m)
-{
- struct stmp_spi *ss = spi_master_get_devdata(spi->master);
- unsigned long flags;
-
- m->status = -EINPROGRESS;
- spin_lock_irqsave(&ss->lock, flags);
- list_add_tail(&m->queue, &ss->queue);
- queue_work(ss->workqueue, &ss->work);
- spin_unlock_irqrestore(&ss->lock, flags);
- return 0;
-}
-
-static irqreturn_t stmp_spi_irq(int irq, void *dev_id)
-{
- struct stmp_spi *ss = dev_id;
-
- stmp3xxx_dma_clear_interrupt(ss->dma);
- complete(&ss->done);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t stmp_spi_irq_err(int irq, void *dev_id)
-{
- struct stmp_spi *ss = dev_id;
- u32 c1, st;
-
- c1 = readl(ss->regs + HW_SSP_CTRL1);
- st = readl(ss->regs + HW_SSP_STATUS);
- dev_err(ss->master_dev, "%s: status = 0x%08X, c1 = 0x%08X\n",
- __func__, st, c1);
- stmp3xxx_clearl(c1 & 0xCCCC0000, ss->regs + HW_SSP_CTRL1);
-
- return IRQ_HANDLED;
-}
-
-static int __devinit stmp_spi_probe(struct platform_device *dev)
-{
- int err = 0;
- struct spi_master *master;
- struct stmp_spi *ss;
- struct resource *r;
-
- master = spi_alloc_master(&dev->dev, sizeof(struct stmp_spi));
- if (master == NULL) {
- err = -ENOMEM;
- goto out0;
- }
- master->flags = SPI_MASTER_HALF_DUPLEX;
-
- ss = spi_master_get_devdata(master);
- platform_set_drvdata(dev, master);
-
- /* Get resources(memory, IRQ) associated with the device */
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- err = -ENODEV;
- goto out_put_master;
- }
- ss->regs = ioremap(r->start, resource_size(r));
- if (!ss->regs) {
- err = -EINVAL;
- goto out_put_master;
- }
-
- ss->master_dev = &dev->dev;
- ss->id = dev->id;
-
- INIT_WORK(&ss->work, stmp_spi_handle);
- INIT_LIST_HEAD(&ss->queue);
- spin_lock_init(&ss->lock);
-
- ss->workqueue = create_singlethread_workqueue(dev_name(&dev->dev));
- if (!ss->workqueue) {
- err = -ENXIO;
- goto out_put_master;
- }
- master->transfer = stmp_spi_transfer;
- master->setup = stmp_spi_setup;
-
- /* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA;
-
- ss->irq = platform_get_irq(dev, 0);
- if (ss->irq < 0) {
- err = ss->irq;
- goto out_put_master;
- }
- ss->err_irq = platform_get_irq(dev, 1);
- if (ss->err_irq < 0) {
- err = ss->err_irq;
- goto out_put_master;
- }
-
- r = platform_get_resource(dev, IORESOURCE_DMA, 0);
- if (r == NULL) {
- err = -ENODEV;
- goto out_put_master;
- }
-
- ss->dma = r->start;
- err = stmp3xxx_dma_request(ss->dma, &dev->dev, dev_name(&dev->dev));
- if (err)
- goto out_put_master;
-
- err = stmp3xxx_dma_allocate_command(ss->dma, &ss->d);
- if (err)
- goto out_free_dma;
-
- master->bus_num = dev->id;
- master->num_chipselect = 1;
-
- /* SPI controller initializations */
- err = stmp_spi_init_hw(ss);
- if (err) {
- dev_dbg(&dev->dev, "cannot initialize hardware\n");
- goto out_free_dma_desc;
- }
-
- if (clock) {
- dev_info(&dev->dev, "clock rate forced to %d\n", clock);
- clk_set_rate(ss->clk, clock);
- }
- ss->speed_khz = clk_get_rate(ss->clk);
- ss->divider = 2;
- dev_info(&dev->dev, "max possible speed %d = %ld/%d kHz\n",
- ss->speed_khz, clk_get_rate(ss->clk), ss->divider);
-
- /* Register for SPI interrupt */
- err = request_irq(ss->irq, stmp_spi_irq, 0,
- dev_name(&dev->dev), ss);
- if (err) {
- dev_dbg(&dev->dev, "request_irq failed, %d\n", err);
- goto out_release_hw;
- }
-
- /* ..and shared interrupt for all SSP controllers */
- err = request_irq(ss->err_irq, stmp_spi_irq_err, IRQF_SHARED,
- dev_name(&dev->dev), ss);
- if (err) {
- dev_dbg(&dev->dev, "request_irq(error) failed, %d\n", err);
- goto out_free_irq;
- }
-
- err = spi_register_master(master);
- if (err) {
- dev_dbg(&dev->dev, "cannot register spi master, %d\n", err);
- goto out_free_irq_2;
- }
- dev_info(&dev->dev, "at (mapped) 0x%08X, irq=%d, bus %d, %s mode\n",
- (u32)ss->regs, ss->irq, master->bus_num,
- pio ? "PIO" : "DMA");
- return 0;
-
-out_free_irq_2:
- free_irq(ss->err_irq, ss);
-out_free_irq:
- free_irq(ss->irq, ss);
-out_free_dma_desc:
- stmp3xxx_dma_free_command(ss->dma, &ss->d);
-out_free_dma:
- stmp3xxx_dma_release(ss->dma);
-out_release_hw:
- stmp_spi_release_hw(ss);
-out_put_master:
- if (ss->workqueue)
- destroy_workqueue(ss->workqueue);
- if (ss->regs)
- iounmap(ss->regs);
- platform_set_drvdata(dev, NULL);
- spi_master_put(master);
-out0:
- return err;
-}
-
-static int __devexit stmp_spi_remove(struct platform_device *dev)
-{
- struct stmp_spi *ss;
- struct spi_master *master;
-
- master = spi_master_get(platform_get_drvdata(dev));
- ss = spi_master_get_devdata(master);
-
- spi_unregister_master(master);
-
- free_irq(ss->err_irq, ss);
- free_irq(ss->irq, ss);
- stmp3xxx_dma_free_command(ss->dma, &ss->d);
- stmp3xxx_dma_release(ss->dma);
- stmp_spi_release_hw(ss);
- destroy_workqueue(ss->workqueue);
- iounmap(ss->regs);
- spi_master_put(master);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int stmp_spi_suspend(struct platform_device *pdev, pm_message_t pmsg)
-{
- struct stmp_spi *ss;
- struct spi_master *master;
-
- master = platform_get_drvdata(pdev);
- ss = spi_master_get_devdata(master);
-
- ss->saved_timings = readl(HW_SSP_TIMING + ss->regs);
- clk_disable(ss->clk);
-
- return 0;
-}
-
-static int stmp_spi_resume(struct platform_device *pdev)
-{
- struct stmp_spi *ss;
- struct spi_master *master;
-
- master = platform_get_drvdata(pdev);
- ss = spi_master_get_devdata(master);
-
- clk_enable(ss->clk);
- stmp3xxx_reset_block(ss->regs, false);
- writel(ss->saved_timings, ss->regs + HW_SSP_TIMING);
-
- return 0;
-}
-
-#else
-#define stmp_spi_suspend NULL
-#define stmp_spi_resume NULL
-#endif
-
-static struct platform_driver stmp_spi_driver = {
- .probe = stmp_spi_probe,
- .remove = __devexit_p(stmp_spi_remove),
- .driver = {
- .name = "stmp3xxx_ssp",
- .owner = THIS_MODULE,
- },
- .suspend = stmp_spi_suspend,
- .resume = stmp_spi_resume,
-};
-module_platform_driver(stmp_spi_driver);
-
-module_param(pio, int, S_IRUGO);
-module_param(clock, int, S_IRUGO);
-MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com>");
-MODULE_DESCRIPTION("STMP3xxx SPI/SSP driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
new file mode 100644
index 00000000000..85204c93f3d
--- /dev/null
+++ b/drivers/spi/spi-sun4i.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/spi/spi.h>
+
+#define SUN4I_FIFO_DEPTH 64
+
+#define SUN4I_RXDATA_REG 0x00
+
+#define SUN4I_TXDATA_REG 0x04
+
+#define SUN4I_CTL_REG 0x08
+#define SUN4I_CTL_ENABLE BIT(0)
+#define SUN4I_CTL_MASTER BIT(1)
+#define SUN4I_CTL_CPHA BIT(2)
+#define SUN4I_CTL_CPOL BIT(3)
+#define SUN4I_CTL_CS_ACTIVE_LOW BIT(4)
+#define SUN4I_CTL_LMTF BIT(6)
+#define SUN4I_CTL_TF_RST BIT(8)
+#define SUN4I_CTL_RF_RST BIT(9)
+#define SUN4I_CTL_XCH BIT(10)
+#define SUN4I_CTL_CS_MASK 0x3000
+#define SUN4I_CTL_CS(cs) (((cs) << 12) & SUN4I_CTL_CS_MASK)
+#define SUN4I_CTL_DHB BIT(15)
+#define SUN4I_CTL_CS_MANUAL BIT(16)
+#define SUN4I_CTL_CS_LEVEL BIT(17)
+#define SUN4I_CTL_TP BIT(18)
+
+#define SUN4I_INT_CTL_REG 0x0c
+#define SUN4I_INT_CTL_TC BIT(16)
+
+#define SUN4I_INT_STA_REG 0x10
+
+#define SUN4I_DMA_CTL_REG 0x14
+
+#define SUN4I_WAIT_REG 0x18
+
+#define SUN4I_CLK_CTL_REG 0x1c
+#define SUN4I_CLK_CTL_CDR2_MASK 0xff
+#define SUN4I_CLK_CTL_CDR2(div) ((div) & SUN4I_CLK_CTL_CDR2_MASK)
+#define SUN4I_CLK_CTL_CDR1_MASK 0xf
+#define SUN4I_CLK_CTL_CDR1(div) (((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN4I_CLK_CTL_DRS BIT(12)
+
+#define SUN4I_BURST_CNT_REG 0x20
+#define SUN4I_BURST_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN4I_XMIT_CNT_REG 0x24
+#define SUN4I_XMIT_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN4I_FIFO_STA_REG 0x28
+#define SUN4I_FIFO_STA_RF_CNT_MASK 0x7f
+#define SUN4I_FIFO_STA_RF_CNT_BITS 0
+#define SUN4I_FIFO_STA_TF_CNT_MASK 0x7f
+#define SUN4I_FIFO_STA_TF_CNT_BITS 16
+
+struct sun4i_spi {
+ struct spi_master *master;
+ void __iomem *base_addr;
+ struct clk *hclk;
+ struct clk *mclk;
+
+ struct completion done;
+
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+};
+
+static inline u32 sun4i_spi_read(struct sun4i_spi *sspi, u32 reg)
+{
+ return readl(sspi->base_addr + reg);
+}
+
+static inline void sun4i_spi_write(struct sun4i_spi *sspi, u32 reg, u32 value)
+{
+ writel(value, sspi->base_addr + reg);
+}
+
+static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
+{
+ u32 reg, cnt;
+ u8 byte;
+
+ /* See how much data is available */
+ reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG);
+ reg &= SUN4I_FIFO_STA_RF_CNT_MASK;
+ cnt = reg >> SUN4I_FIFO_STA_RF_CNT_BITS;
+
+ if (len > cnt)
+ len = cnt;
+
+ while (len--) {
+ byte = readb(sspi->base_addr + SUN4I_RXDATA_REG);
+ if (sspi->rx_buf)
+ *sspi->rx_buf++ = byte;
+ }
+}
+
+static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len)
+{
+ u8 byte;
+
+ if (len > sspi->len)
+ len = sspi->len;
+
+ while (len--) {
+ byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
+ writeb(byte, sspi->base_addr + SUN4I_TXDATA_REG);
+ sspi->len--;
+ }
+}
+
+static void sun4i_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct sun4i_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+
+ reg &= ~SUN4I_CTL_CS_MASK;
+ reg |= SUN4I_CTL_CS(spi->chip_select);
+
+ if (enable)
+ reg |= SUN4I_CTL_CS_LEVEL;
+ else
+ reg &= ~SUN4I_CTL_CS_LEVEL;
+
+ /*
+ * Even though this looks irrelevant since we are supposed to
+ * be controlling the chip select manually, this bit also
+ * controls the levels of the chip select for inactive
+ * devices.
+ *
+ * If we don't set it, the chip select level will go low by
+ * default when the device is idle, which is not really
+ * expected in the common case where the chip select is active
+ * low.
+ */
+ if (spi->mode & SPI_CS_HIGH)
+ reg &= ~SUN4I_CTL_CS_ACTIVE_LOW;
+ else
+ reg |= SUN4I_CTL_CS_ACTIVE_LOW;
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
+}
+
+static int sun4i_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ unsigned int mclk_rate, div, timeout;
+ unsigned int tx_len = 0;
+ int ret = 0;
+ u32 reg;
+
+ /* We don't support transfer larger than the FIFO */
+ if (tfr->len > SUN4I_FIFO_DEPTH)
+ return -EINVAL;
+
+ reinit_completion(&sspi->done);
+ sspi->tx_buf = tfr->tx_buf;
+ sspi->rx_buf = tfr->rx_buf;
+ sspi->len = tfr->len;
+
+ /* Clear pending interrupts */
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, ~0);
+
+
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+
+ /* Reset FIFOs */
+ sun4i_spi_write(sspi, SUN4I_CTL_REG,
+ reg | SUN4I_CTL_RF_RST | SUN4I_CTL_TF_RST);
+
+ /*
+ * Setup the transfer control register: Chip Select,
+ * polarities, etc.
+ */
+ if (spi->mode & SPI_CPOL)
+ reg |= SUN4I_CTL_CPOL;
+ else
+ reg &= ~SUN4I_CTL_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= SUN4I_CTL_CPHA;
+ else
+ reg &= ~SUN4I_CTL_CPHA;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= SUN4I_CTL_LMTF;
+ else
+ reg &= ~SUN4I_CTL_LMTF;
+
+
+ /*
+ * If it's a TX only transfer, we don't want to fill the RX
+ * FIFO with bogus data
+ */
+ if (sspi->rx_buf)
+ reg &= ~SUN4I_CTL_DHB;
+ else
+ reg |= SUN4I_CTL_DHB;
+
+ /* We want to control the chip select manually */
+ reg |= SUN4I_CTL_CS_MANUAL;
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
+
+ /* Ensure that we have a parent clock fast enough */
+ mclk_rate = clk_get_rate(sspi->mclk);
+ if (mclk_rate < (2 * spi->max_speed_hz)) {
+ clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz);
+ mclk_rate = clk_get_rate(sspi->mclk);
+ }
+
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ (cdr + 1))
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Wether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+ div = mclk_rate / (2 * spi->max_speed_hz);
+ if (div <= (SUN4I_CLK_CTL_CDR2_MASK + 1)) {
+ if (div > 0)
+ div--;
+
+ reg = SUN4I_CLK_CTL_CDR2(div) | SUN4I_CLK_CTL_DRS;
+ } else {
+ div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz);
+ reg = SUN4I_CLK_CTL_CDR1(div);
+ }
+
+ sun4i_spi_write(sspi, SUN4I_CLK_CTL_REG, reg);
+
+ /* Setup the transfer now... */
+ if (sspi->tx_buf)
+ tx_len = tfr->len;
+
+ /* Setup the counters */
+ sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
+ sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
+
+ /* Fill the TX FIFO */
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+ /* Enable the interrupts */
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
+
+ /* Start the transfer */
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+
+ timeout = wait_for_completion_timeout(&sspi->done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+out:
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0);
+
+ return ret;
+}
+
+static irqreturn_t sun4i_spi_handler(int irq, void *dev_id)
+{
+ struct sun4i_spi *sspi = dev_id;
+ u32 status = sun4i_spi_read(sspi, SUN4I_INT_STA_REG);
+
+ /* Transfer complete */
+ if (status & SUN4I_INT_CTL_TC) {
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TC);
+ complete(&sspi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sun4i_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sspi->hclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable AHB clock\n");
+ goto out;
+ }
+
+ ret = clk_prepare_enable(sspi->mclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable module clock\n");
+ goto err;
+ }
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG,
+ SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(sspi->hclk);
+out:
+ return ret;
+}
+
+static int sun4i_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(sspi->mclk);
+ clk_disable_unprepare(sspi->hclk);
+
+ return 0;
+}
+
+static int sun4i_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sun4i_spi *sspi;
+ struct resource *res;
+ int ret = 0, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sspi->base_addr)) {
+ ret = PTR_ERR(sspi->base_addr);
+ goto err_free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No spi IRQ specified\n");
+ ret = -ENXIO;
+ goto err_free_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sun4i_spi_handler,
+ 0, "sun4i-spi", sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_free_master;
+ }
+
+ sspi->master = master;
+ master->set_cs = sun4i_spi_set_cs;
+ master->transfer_one = sun4i_spi_transfer_one;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sspi->hclk)) {
+ dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+ ret = PTR_ERR(sspi->hclk);
+ goto err_free_master;
+ }
+
+ sspi->mclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(sspi->mclk)) {
+ dev_err(&pdev->dev, "Unable to acquire module clock\n");
+ ret = PTR_ERR(sspi->mclk);
+ goto err_free_master;
+ }
+
+ init_completion(&sspi->done);
+
+ /*
+ * This wake-up/shutdown pattern is to be able to have the
+ * device woken up, even if runtime_pm is disabled
+ */
+ ret = sun4i_spi_runtime_resume(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't resume the device\n");
+ goto err_free_master;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ goto err_pm_disable;
+ }
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ sun4i_spi_runtime_suspend(&pdev->dev);
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int sun4i_spi_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_spi_match[] = {
+ { .compatible = "allwinner,sun4i-a10-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun4i_spi_match);
+
+static const struct dev_pm_ops sun4i_spi_pm_ops = {
+ .runtime_resume = sun4i_spi_runtime_resume,
+ .runtime_suspend = sun4i_spi_runtime_suspend,
+};
+
+static struct platform_driver sun4i_spi_driver = {
+ .probe = sun4i_spi_probe,
+ .remove = sun4i_spi_remove,
+ .driver = {
+ .name = "sun4i-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = sun4i_spi_match,
+ .pm = &sun4i_spi_pm_ops,
+ },
+};
+module_platform_driver(sun4i_spi_driver);
+
+MODULE_AUTHOR("Pan Nan <pannan@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A1X/A20 SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
new file mode 100644
index 00000000000..bd24093f403
--- /dev/null
+++ b/drivers/spi/spi-sun6i.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <linux/spi/spi.h>
+
+#define SUN6I_FIFO_DEPTH 128
+
+#define SUN6I_GBL_CTL_REG 0x04
+#define SUN6I_GBL_CTL_BUS_ENABLE BIT(0)
+#define SUN6I_GBL_CTL_MASTER BIT(1)
+#define SUN6I_GBL_CTL_TP BIT(7)
+#define SUN6I_GBL_CTL_RST BIT(31)
+
+#define SUN6I_TFR_CTL_REG 0x08
+#define SUN6I_TFR_CTL_CPHA BIT(0)
+#define SUN6I_TFR_CTL_CPOL BIT(1)
+#define SUN6I_TFR_CTL_SPOL BIT(2)
+#define SUN6I_TFR_CTL_CS_MASK 0x30
+#define SUN6I_TFR_CTL_CS(cs) (((cs) << 4) & SUN6I_TFR_CTL_CS_MASK)
+#define SUN6I_TFR_CTL_CS_MANUAL BIT(6)
+#define SUN6I_TFR_CTL_CS_LEVEL BIT(7)
+#define SUN6I_TFR_CTL_DHB BIT(8)
+#define SUN6I_TFR_CTL_FBS BIT(12)
+#define SUN6I_TFR_CTL_XCH BIT(31)
+
+#define SUN6I_INT_CTL_REG 0x10
+#define SUN6I_INT_CTL_RF_OVF BIT(8)
+#define SUN6I_INT_CTL_TC BIT(12)
+
+#define SUN6I_INT_STA_REG 0x14
+
+#define SUN6I_FIFO_CTL_REG 0x18
+#define SUN6I_FIFO_CTL_RF_RST BIT(15)
+#define SUN6I_FIFO_CTL_TF_RST BIT(31)
+
+#define SUN6I_FIFO_STA_REG 0x1c
+#define SUN6I_FIFO_STA_RF_CNT_MASK 0x7f
+#define SUN6I_FIFO_STA_RF_CNT_BITS 0
+#define SUN6I_FIFO_STA_TF_CNT_MASK 0x7f
+#define SUN6I_FIFO_STA_TF_CNT_BITS 16
+
+#define SUN6I_CLK_CTL_REG 0x24
+#define SUN6I_CLK_CTL_CDR2_MASK 0xff
+#define SUN6I_CLK_CTL_CDR2(div) (((div) & SUN6I_CLK_CTL_CDR2_MASK) << 0)
+#define SUN6I_CLK_CTL_CDR1_MASK 0xf
+#define SUN6I_CLK_CTL_CDR1(div) (((div) & SUN6I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN6I_CLK_CTL_DRS BIT(12)
+
+#define SUN6I_BURST_CNT_REG 0x30
+#define SUN6I_BURST_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_XMIT_CNT_REG 0x34
+#define SUN6I_XMIT_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_BURST_CTL_CNT_REG 0x38
+#define SUN6I_BURST_CTL_CNT_STC(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_TXDATA_REG 0x200
+#define SUN6I_RXDATA_REG 0x300
+
+struct sun6i_spi {
+ struct spi_master *master;
+ void __iomem *base_addr;
+ struct clk *hclk;
+ struct clk *mclk;
+ struct reset_control *rstc;
+
+ struct completion done;
+
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+};
+
+static inline u32 sun6i_spi_read(struct sun6i_spi *sspi, u32 reg)
+{
+ return readl(sspi->base_addr + reg);
+}
+
+static inline void sun6i_spi_write(struct sun6i_spi *sspi, u32 reg, u32 value)
+{
+ writel(value, sspi->base_addr + reg);
+}
+
+static inline void sun6i_spi_drain_fifo(struct sun6i_spi *sspi, int len)
+{
+ u32 reg, cnt;
+ u8 byte;
+
+ /* See how much data is available */
+ reg = sun6i_spi_read(sspi, SUN6I_FIFO_STA_REG);
+ reg &= SUN6I_FIFO_STA_RF_CNT_MASK;
+ cnt = reg >> SUN6I_FIFO_STA_RF_CNT_BITS;
+
+ if (len > cnt)
+ len = cnt;
+
+ while (len--) {
+ byte = readb(sspi->base_addr + SUN6I_RXDATA_REG);
+ if (sspi->rx_buf)
+ *sspi->rx_buf++ = byte;
+ }
+}
+
+static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi, int len)
+{
+ u8 byte;
+
+ if (len > sspi->len)
+ len = sspi->len;
+
+ while (len--) {
+ byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
+ writeb(byte, sspi->base_addr + SUN6I_TXDATA_REG);
+ sspi->len--;
+ }
+}
+
+static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct sun6i_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ reg &= ~SUN6I_TFR_CTL_CS_MASK;
+ reg |= SUN6I_TFR_CTL_CS(spi->chip_select);
+
+ if (enable)
+ reg |= SUN6I_TFR_CTL_CS_LEVEL;
+ else
+ reg &= ~SUN6I_TFR_CTL_CS_LEVEL;
+
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
+}
+
+
+static int sun6i_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ unsigned int mclk_rate, div, timeout;
+ unsigned int tx_len = 0;
+ int ret = 0;
+ u32 reg;
+
+ /* We don't support transfer larger than the FIFO */
+ if (tfr->len > SUN6I_FIFO_DEPTH)
+ return -EINVAL;
+
+ reinit_completion(&sspi->done);
+ sspi->tx_buf = tfr->tx_buf;
+ sspi->rx_buf = tfr->rx_buf;
+ sspi->len = tfr->len;
+
+ /* Clear pending interrupts */
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, ~0);
+
+ /* Reset FIFO */
+ sun6i_spi_write(sspi, SUN6I_FIFO_CTL_REG,
+ SUN6I_FIFO_CTL_RF_RST | SUN6I_FIFO_CTL_TF_RST);
+
+ /*
+ * Setup the transfer control register: Chip Select,
+ * polarities, etc.
+ */
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+
+ if (spi->mode & SPI_CPOL)
+ reg |= SUN6I_TFR_CTL_CPOL;
+ else
+ reg &= ~SUN6I_TFR_CTL_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= SUN6I_TFR_CTL_CPHA;
+ else
+ reg &= ~SUN6I_TFR_CTL_CPHA;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= SUN6I_TFR_CTL_FBS;
+ else
+ reg &= ~SUN6I_TFR_CTL_FBS;
+
+ /*
+ * If it's a TX only transfer, we don't want to fill the RX
+ * FIFO with bogus data
+ */
+ if (sspi->rx_buf)
+ reg &= ~SUN6I_TFR_CTL_DHB;
+ else
+ reg |= SUN6I_TFR_CTL_DHB;
+
+ /* We want to control the chip select manually */
+ reg |= SUN6I_TFR_CTL_CS_MANUAL;
+
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
+
+ /* Ensure that we have a parent clock fast enough */
+ mclk_rate = clk_get_rate(sspi->mclk);
+ if (mclk_rate < (2 * spi->max_speed_hz)) {
+ clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz);
+ mclk_rate = clk_get_rate(sspi->mclk);
+ }
+
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ cdr)
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Wether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+ div = mclk_rate / (2 * spi->max_speed_hz);
+ if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
+ if (div > 0)
+ div--;
+
+ reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
+ } else {
+ div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz);
+ reg = SUN6I_CLK_CTL_CDR1(div);
+ }
+
+ sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
+
+ /* Setup the transfer now... */
+ if (sspi->tx_buf)
+ tx_len = tfr->len;
+
+ /* Setup the counters */
+ sun6i_spi_write(sspi, SUN6I_BURST_CNT_REG, SUN6I_BURST_CNT(tfr->len));
+ sun6i_spi_write(sspi, SUN6I_XMIT_CNT_REG, SUN6I_XMIT_CNT(tx_len));
+ sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG,
+ SUN6I_BURST_CTL_CNT_STC(tx_len));
+
+ /* Fill the TX FIFO */
+ sun6i_spi_fill_fifo(sspi, SUN6I_FIFO_DEPTH);
+
+ /* Enable the interrupts */
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, SUN6I_INT_CTL_TC);
+
+ /* Start the transfer */
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+
+ timeout = wait_for_completion_timeout(&sspi->done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ sun6i_spi_drain_fifo(sspi, SUN6I_FIFO_DEPTH);
+
+out:
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
+
+ return ret;
+}
+
+static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
+{
+ struct sun6i_spi *sspi = dev_id;
+ u32 status = sun6i_spi_read(sspi, SUN6I_INT_STA_REG);
+
+ /* Transfer complete */
+ if (status & SUN6I_INT_CTL_TC) {
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
+ complete(&sspi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sun6i_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sspi->hclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable AHB clock\n");
+ goto out;
+ }
+
+ ret = clk_prepare_enable(sspi->mclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable module clock\n");
+ goto err;
+ }
+
+ ret = reset_control_deassert(sspi->rstc);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert the device from reset\n");
+ goto err2;
+ }
+
+ sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG,
+ SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
+
+ return 0;
+
+err2:
+ clk_disable_unprepare(sspi->mclk);
+err:
+ clk_disable_unprepare(sspi->hclk);
+out:
+ return ret;
+}
+
+static int sun6i_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+
+ reset_control_assert(sspi->rstc);
+ clk_disable_unprepare(sspi->mclk);
+ clk_disable_unprepare(sspi->hclk);
+
+ return 0;
+}
+
+static int sun6i_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sun6i_spi *sspi;
+ struct resource *res;
+ int ret = 0, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sspi->base_addr)) {
+ ret = PTR_ERR(sspi->base_addr);
+ goto err_free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No spi IRQ specified\n");
+ ret = -ENXIO;
+ goto err_free_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sun6i_spi_handler,
+ 0, "sun6i-spi", sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_free_master;
+ }
+
+ sspi->master = master;
+ master->set_cs = sun6i_spi_set_cs;
+ master->transfer_one = sun6i_spi_transfer_one;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sspi->hclk)) {
+ dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+ ret = PTR_ERR(sspi->hclk);
+ goto err_free_master;
+ }
+
+ sspi->mclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(sspi->mclk)) {
+ dev_err(&pdev->dev, "Unable to acquire module clock\n");
+ ret = PTR_ERR(sspi->mclk);
+ goto err_free_master;
+ }
+
+ init_completion(&sspi->done);
+
+ sspi->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(sspi->rstc)) {
+ dev_err(&pdev->dev, "Couldn't get reset controller\n");
+ ret = PTR_ERR(sspi->rstc);
+ goto err_free_master;
+ }
+
+ /*
+ * This wake-up/shutdown pattern is to be able to have the
+ * device woken up, even if runtime_pm is disabled
+ */
+ ret = sun6i_spi_runtime_resume(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't resume the device\n");
+ goto err_free_master;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ goto err_pm_disable;
+ }
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ sun6i_spi_runtime_suspend(&pdev->dev);
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int sun6i_spi_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_spi_match[] = {
+ { .compatible = "allwinner,sun6i-a31-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun6i_spi_match);
+
+static const struct dev_pm_ops sun6i_spi_pm_ops = {
+ .runtime_resume = sun6i_spi_runtime_resume,
+ .runtime_suspend = sun6i_spi_runtime_suspend,
+};
+
+static struct platform_driver sun6i_spi_driver = {
+ .probe = sun6i_spi_probe,
+ .remove = sun6i_spi_remove,
+ .driver = {
+ .name = "sun6i-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = sun6i_spi_match,
+ .pm = &sun6i_spi_pm_ops,
+ },
+};
+module_platform_driver(sun6i_spi_driver);
+
+MODULE_AUTHOR("Pan Nan <pannan@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
new file mode 100644
index 00000000000..e4a85ada861
--- /dev/null
+++ b/drivers/spi/spi-tegra114.c
@@ -0,0 +1,1229 @@
+/*
+ * SPI driver for NVIDIA's Tegra114 SPI Controller.
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define SPI_COMMAND1 0x000
+#define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SPI_PACKED (1 << 5)
+#define SPI_TX_EN (1 << 11)
+#define SPI_RX_EN (1 << 12)
+#define SPI_BOTH_EN_BYTE (1 << 13)
+#define SPI_BOTH_EN_BIT (1 << 14)
+#define SPI_LSBYTE_FE (1 << 15)
+#define SPI_LSBIT_FE (1 << 16)
+#define SPI_BIDIROE (1 << 17)
+#define SPI_IDLE_SDA_DRIVE_LOW (0 << 18)
+#define SPI_IDLE_SDA_DRIVE_HIGH (1 << 18)
+#define SPI_IDLE_SDA_PULL_LOW (2 << 18)
+#define SPI_IDLE_SDA_PULL_HIGH (3 << 18)
+#define SPI_IDLE_SDA_MASK (3 << 18)
+#define SPI_CS_SS_VAL (1 << 20)
+#define SPI_CS_SW_HW (1 << 21)
+/* SPI_CS_POL_INACTIVE bits are default high */
+ /* n from 0 to 3 */
+#define SPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
+#define SPI_CS_POL_INACTIVE_MASK (0xF << 22)
+
+#define SPI_CS_SEL_0 (0 << 26)
+#define SPI_CS_SEL_1 (1 << 26)
+#define SPI_CS_SEL_2 (2 << 26)
+#define SPI_CS_SEL_3 (3 << 26)
+#define SPI_CS_SEL_MASK (3 << 26)
+#define SPI_CS_SEL(x) (((x) & 0x3) << 26)
+#define SPI_CONTROL_MODE_0 (0 << 28)
+#define SPI_CONTROL_MODE_1 (1 << 28)
+#define SPI_CONTROL_MODE_2 (2 << 28)
+#define SPI_CONTROL_MODE_3 (3 << 28)
+#define SPI_CONTROL_MODE_MASK (3 << 28)
+#define SPI_MODE_SEL(x) (((x) & 0x3) << 28)
+#define SPI_M_S (1 << 30)
+#define SPI_PIO (1 << 31)
+
+#define SPI_COMMAND2 0x004
+#define SPI_TX_TAP_DELAY(x) (((x) & 0x3F) << 6)
+#define SPI_RX_TAP_DELAY(x) (((x) & 0x3F) << 0)
+
+#define SPI_CS_TIMING1 0x008
+#define SPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
+#define SPI_CS_SETUP_HOLD(reg, cs, val) \
+ ((((val) & 0xFFu) << ((cs) * 8)) | \
+ ((reg) & ~(0xFFu << ((cs) * 8))))
+
+#define SPI_CS_TIMING2 0x00C
+#define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1F) << 0)
+#define CS_ACTIVE_BETWEEN_PACKETS_0 (1 << 5)
+#define CYCLES_BETWEEN_PACKETS_1(x) (((x) & 0x1F) << 8)
+#define CS_ACTIVE_BETWEEN_PACKETS_1 (1 << 13)
+#define CYCLES_BETWEEN_PACKETS_2(x) (((x) & 0x1F) << 16)
+#define CS_ACTIVE_BETWEEN_PACKETS_2 (1 << 21)
+#define CYCLES_BETWEEN_PACKETS_3(x) (((x) & 0x1F) << 24)
+#define CS_ACTIVE_BETWEEN_PACKETS_3 (1 << 29)
+#define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val) \
+ (reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \
+ ((reg) & ~(1 << ((cs) * 8 + 5))))
+#define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \
+ (reg = (((val) & 0xF) << ((cs) * 8)) | \
+ ((reg) & ~(0xF << ((cs) * 8))))
+
+#define SPI_TRANS_STATUS 0x010
+#define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF)
+#define SPI_SLV_IDLE_COUNT(val) (((val) >> 16) & 0xFF)
+#define SPI_RDY (1 << 30)
+
+#define SPI_FIFO_STATUS 0x014
+#define SPI_RX_FIFO_EMPTY (1 << 0)
+#define SPI_RX_FIFO_FULL (1 << 1)
+#define SPI_TX_FIFO_EMPTY (1 << 2)
+#define SPI_TX_FIFO_FULL (1 << 3)
+#define SPI_RX_FIFO_UNF (1 << 4)
+#define SPI_RX_FIFO_OVF (1 << 5)
+#define SPI_TX_FIFO_UNF (1 << 6)
+#define SPI_TX_FIFO_OVF (1 << 7)
+#define SPI_ERR (1 << 8)
+#define SPI_TX_FIFO_FLUSH (1 << 14)
+#define SPI_RX_FIFO_FLUSH (1 << 15)
+#define SPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7F)
+#define SPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7F)
+#define SPI_FRAME_END (1 << 30)
+#define SPI_CS_INACTIVE (1 << 31)
+
+#define SPI_FIFO_ERROR (SPI_RX_FIFO_UNF | \
+ SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF)
+#define SPI_FIFO_EMPTY (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY)
+
+#define SPI_TX_DATA 0x018
+#define SPI_RX_DATA 0x01C
+
+#define SPI_DMA_CTL 0x020
+#define SPI_TX_TRIG_1 (0 << 15)
+#define SPI_TX_TRIG_4 (1 << 15)
+#define SPI_TX_TRIG_8 (2 << 15)
+#define SPI_TX_TRIG_16 (3 << 15)
+#define SPI_TX_TRIG_MASK (3 << 15)
+#define SPI_RX_TRIG_1 (0 << 19)
+#define SPI_RX_TRIG_4 (1 << 19)
+#define SPI_RX_TRIG_8 (2 << 19)
+#define SPI_RX_TRIG_16 (3 << 19)
+#define SPI_RX_TRIG_MASK (3 << 19)
+#define SPI_IE_TX (1 << 28)
+#define SPI_IE_RX (1 << 29)
+#define SPI_CONT (1 << 30)
+#define SPI_DMA (1 << 31)
+#define SPI_DMA_EN SPI_DMA
+
+#define SPI_DMA_BLK 0x024
+#define SPI_DMA_BLK_SET(x) (((x) & 0xFFFF) << 0)
+
+#define SPI_TX_FIFO 0x108
+#define SPI_RX_FIFO 0x188
+#define MAX_CHIP_SELECT 4
+#define SPI_FIFO_DEPTH 64
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
+#define TX_FIFO_EMPTY_COUNT_MAX SPI_TX_FIFO_EMPTY_COUNT(0x40)
+#define RX_FIFO_FULL_COUNT_ZERO SPI_RX_FIFO_FULL_COUNT(0)
+#define MAX_HOLD_CYCLES 16
+#define SPI_DEFAULT_SPEED 25000000
+
+struct tegra_spi_data {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *base;
+ phys_addr_t phys;
+ unsigned irq;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ struct spi_device *cs_control;
+ unsigned cur_pos;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+ unsigned cur_direction;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+
+ u32 command1_reg;
+ u32 dma_control_reg;
+ u32 def_command1_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+ struct dma_chan *rx_dma_chan;
+ u32 *rx_dma_buf;
+ dma_addr_t rx_dma_phys;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+
+ struct dma_chan *tx_dma_chan;
+ u32 *tx_dma_buf;
+ dma_addr_t tx_dma_phys;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+};
+
+static int tegra_spi_runtime_suspend(struct device *dev);
+static int tegra_spi_runtime_resume(struct device *dev);
+
+static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
+ u32 val, unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+
+ /* Read back register to make sure that register writes completed */
+ if (reg != SPI_TX_FIFO)
+ readl(tspi->base + SPI_COMMAND1);
+}
+
+static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
+{
+ u32 val;
+
+ /* Write 1 to clear status register */
+ val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
+ tegra_spi_writel(tspi, val, SPI_TRANS_STATUS);
+
+ /* Clear fifo status error if any */
+ val = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if (val & SPI_ERR)
+ tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR,
+ SPI_FIFO_STATUS);
+}
+
+static unsigned tegra_spi_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_spi_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word = t->bits_per_word;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
+
+ if (bits_per_word == 8 || bits_per_word == 16) {
+ tspi->is_packed = 1;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = 0;
+ tspi->words_per_32bit = 1;
+ }
+
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = (max_len + 3) / 4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = max_word;
+ }
+ return total_fifo_words;
+}
+
+static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ u32 fifo_status;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned int written_words;
+ unsigned fifo_words_left;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+
+ fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tspi->is_packed) {
+ fifo_words_left = tx_empty_count * tspi->words_per_32bit;
+ written_words = min(fifo_words_left, tspi->curr_dma_words);
+ nbytes = written_words * tspi->bytes_per_word;
+ max_n_32bit = DIV_ROUND_UP(nbytes, 4);
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_spi_writel(tspi, x, SPI_TX_FIFO);
+ }
+ } else {
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ nbytes = written_words * tspi->bytes_per_word;
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_spi_writel(tspi, x, SPI_TX_FIFO);
+ }
+ }
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ return written_words;
+}
+
+static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned rx_full_count;
+ u32 fifo_status;
+ unsigned i, count;
+ unsigned int read_words = 0;
+ unsigned len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+
+ fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
+ } else {
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
+ }
+ return read_words;
+}
+
+static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+
+ if (tspi->is_packed) {
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = 0;
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tspi->tx_dma_buf[count] = x;
+ }
+ }
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+}
+
+static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ if (tspi->is_packed) {
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = tspi->rx_dma_buf[count] & rx_mask;
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+}
+
+static void tegra_spi_dma_complete(void *args)
+{
+ struct completion *dma_complete = args;
+
+ complete(dma_complete);
+}
+
+static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
+{
+ reinit_completion(&tspi->tx_dma_complete);
+ tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
+ tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->tx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Tx\n");
+ return -EIO;
+ }
+
+ tspi->tx_dma_desc->callback = tegra_spi_dma_complete;
+ tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
+
+ dmaengine_submit(tspi->tx_dma_desc);
+ dma_async_issue_pending(tspi->tx_dma_chan);
+ return 0;
+}
+
+static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
+{
+ reinit_completion(&tspi->rx_dma_complete);
+ tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
+ tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->rx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Rx\n");
+ return -EIO;
+ }
+
+ tspi->rx_dma_desc->callback = tegra_spi_dma_complete;
+ tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
+
+ dmaengine_submit(tspi->rx_dma_desc);
+ dma_async_issue_pending(tspi->rx_dma_chan);
+ return 0;
+}
+
+static int tegra_spi_start_dma_based_transfer(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned int len;
+ int ret = 0;
+ u32 status;
+
+ /* Make sure that Rx and Tx fifo are empty */
+ status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
+ dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
+ (unsigned)status);
+ return -EIO;
+ }
+
+ val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
+ tegra_spi_writel(tspi, val, SPI_DMA_BLK);
+
+ if (tspi->is_packed)
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
+
+ /* Set attention level based on length of transfer */
+ if (len & 0xF)
+ val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
+ else if (((len) >> 4) & 0x1)
+ val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
+ else
+ val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TX;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RX;
+
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ ret = tegra_spi_start_tx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting tx dma failed, err %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ ret = tegra_spi_start_rx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting rx dma failed, err %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ tspi->dma_control_reg = val;
+
+ val |= SPI_DMA_EN;
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ return ret;
+}
+
+static int tegra_spi_start_cpu_based_transfer(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned cur_words;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ cur_words = tspi->curr_dma_words;
+
+ val = SPI_DMA_BLK_SET(cur_words - 1);
+ tegra_spi_writel(tspi, val, SPI_DMA_BLK);
+
+ val = 0;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TX;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RX;
+
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+
+ val |= SPI_DMA_EN;
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ return 0;
+}
+
+static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ int ret;
+ struct dma_slave_config dma_sconfig;
+
+ dma_chan = dma_request_slave_channel_reason(tspi->dev,
+ dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(tspi->dev,
+ "Dma channel is not available: %d\n", ret);
+ return ret;
+ }
+
+ dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
+ &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
+ dma_release_channel(dma_chan);
+ return -ENOMEM;
+ }
+
+ if (dma_to_memory) {
+ dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = 0;
+ } else {
+ dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = 0;
+ }
+
+ ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
+ if (ret)
+ goto scrub;
+ if (dma_to_memory) {
+ tspi->rx_dma_chan = dma_chan;
+ tspi->rx_dma_buf = dma_buf;
+ tspi->rx_dma_phys = dma_phys;
+ } else {
+ tspi->tx_dma_chan = dma_chan;
+ tspi->tx_dma_buf = dma_buf;
+ tspi->tx_dma_phys = dma_phys;
+ }
+ return 0;
+
+scrub:
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+ return ret;
+}
+
+static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
+ bool dma_to_memory)
+{
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ struct dma_chan *dma_chan;
+
+ if (dma_to_memory) {
+ dma_buf = tspi->rx_dma_buf;
+ dma_chan = tspi->rx_dma_chan;
+ dma_phys = tspi->rx_dma_phys;
+ tspi->rx_dma_chan = NULL;
+ tspi->rx_dma_buf = NULL;
+ } else {
+ dma_buf = tspi->tx_dma_buf;
+ dma_chan = tspi->tx_dma_chan;
+ dma_phys = tspi->tx_dma_phys;
+ tspi->tx_dma_buf = NULL;
+ tspi->tx_dma_chan = NULL;
+ }
+ if (!dma_chan)
+ return;
+
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+}
+
+static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed = t->speed_hz;
+ u8 bits_per_word = t->bits_per_word;
+ u32 command1;
+ int req_mode;
+
+ if (speed != tspi->cur_speed) {
+ clk_set_rate(tspi->clk, speed);
+ tspi->cur_speed = speed;
+ }
+
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->curr_xfer = t;
+
+ if (is_first_of_msg) {
+ tegra_spi_clear_status(tspi);
+
+ command1 = tspi->def_command1_reg;
+ command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
+
+ command1 &= ~SPI_CONTROL_MODE_MASK;
+ req_mode = spi->mode & 0x3;
+ if (req_mode == SPI_MODE_0)
+ command1 |= SPI_CONTROL_MODE_0;
+ else if (req_mode == SPI_MODE_1)
+ command1 |= SPI_CONTROL_MODE_1;
+ else if (req_mode == SPI_MODE_2)
+ command1 |= SPI_CONTROL_MODE_2;
+ else if (req_mode == SPI_MODE_3)
+ command1 |= SPI_CONTROL_MODE_3;
+
+ if (tspi->cs_control) {
+ if (tspi->cs_control != spi)
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+ tspi->cs_control = NULL;
+ } else
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+
+ command1 |= SPI_CS_SW_HW;
+ if (spi->mode & SPI_CS_HIGH)
+ command1 |= SPI_CS_SS_VAL;
+ else
+ command1 &= ~SPI_CS_SS_VAL;
+
+ tegra_spi_writel(tspi, 0, SPI_COMMAND2);
+ } else {
+ command1 = tspi->command1_reg;
+ command1 &= ~SPI_BIT_LENGTH(~0);
+ command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
+ }
+
+ return command1;
+}
+
+static int tegra_spi_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, u32 command1)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned total_fifo_words;
+ int ret;
+
+ total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
+
+ if (tspi->is_packed)
+ command1 |= SPI_PACKED;
+
+ command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ command1 |= SPI_RX_EN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command1 |= SPI_TX_EN;
+ tspi->cur_direction |= DATA_DIR_TX;
+ }
+ command1 |= SPI_CS_SEL(spi->chip_select);
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+ tspi->command1_reg = command1;
+
+ dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
+ tspi->def_command1_reg, (unsigned)command1);
+
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ ret = tegra_spi_start_dma_based_transfer(tspi, t);
+ else
+ ret = tegra_spi_start_cpu_based_transfer(tspi, t);
+ return ret;
+}
+
+static int tegra_spi_setup(struct spi_device *spi)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ u32 val;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+ ret = pm_runtime_get_sync(tspi->dev);
+ if (ret < 0) {
+ dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ val = tspi->def_command1_reg;
+ if (spi->mode & SPI_CS_HIGH)
+ val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
+ else
+ val |= SPI_CS_POL_INACTIVE(spi->chip_select);
+ tspi->def_command1_reg = val;
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
+static void tegra_spi_transfer_delay(int delay)
+{
+ if (!delay)
+ return;
+
+ if (delay >= 1000)
+ mdelay(delay / 1000);
+
+ udelay(delay % 1000);
+}
+
+static int tegra_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret;
+ bool skip = false;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ u32 cmd1;
+
+ reinit_completion(&tspi->xfer_completion);
+
+ cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg);
+
+ if (!xfer->len) {
+ ret = 0;
+ skip = true;
+ goto complete_xfer;
+ }
+
+ ret = tegra_spi_start_transfer_one(spi, xfer, cmd1);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "spi can not start transfer, err %d\n", ret);
+ goto complete_xfer;
+ }
+
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tspi->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ ret = -EIO;
+ goto complete_xfer;
+ }
+
+ if (tspi->tx_status || tspi->rx_status) {
+ dev_err(tspi->dev, "Error in Transfer\n");
+ ret = -EIO;
+ goto complete_xfer;
+ }
+ msg->actual_length += xfer->len;
+
+complete_xfer:
+ if (ret < 0 || skip) {
+ tegra_spi_writel(tspi, tspi->def_command1_reg,
+ SPI_COMMAND1);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
+ goto exit;
+ } else if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ if (xfer->cs_change)
+ tspi->cs_control = spi;
+ else {
+ tegra_spi_writel(tspi, tspi->def_command1_reg,
+ SPI_COMMAND1);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
+ }
+ } else if (xfer->cs_change) {
+ tegra_spi_writel(tspi, tspi->def_command1_reg,
+ SPI_COMMAND1);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
+ }
+
+ }
+ ret = 0;
+exit:
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status) {
+ dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n",
+ tspi->status_reg);
+ dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
+ tspi->command1_reg, tspi->dma_control_reg);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
+ tegra_spi_start_cpu_based_transfer(tspi, t);
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ long wait_status;
+ int err = 0;
+ unsigned total_fifo_words;
+ unsigned long flags;
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ dev_err(tspi->dev, "TxDma Xfer failed\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ dev_err(tspi->dev, "RxDma Xfer failed\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n",
+ tspi->status_reg);
+ dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
+ tspi->command1_reg, tspi->dma_control_reg);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ /* Continue transfer in current message */
+ total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi,
+ tspi, t);
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ err = tegra_spi_start_dma_based_transfer(tspi, t);
+ else
+ err = tegra_spi_start_cpu_based_transfer(tspi, t);
+
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data)
+{
+ struct tegra_spi_data *tspi = context_data;
+
+ if (!tspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tspi);
+ return handle_dma_based_xfer(tspi);
+}
+
+static irqreturn_t tegra_spi_isr(int irq, void *context_data)
+{
+ struct tegra_spi_data *tspi = context_data;
+
+ tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
+ tegra_spi_clear_status(tspi);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static const struct of_device_id tegra_spi_of_match[] = {
+ { .compatible = "nvidia,tegra114-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
+
+static int tegra_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_spi_data *tspi;
+ struct resource *r;
+ int ret, spi_irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+ tspi = spi_master_get_devdata(master);
+
+ if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tegra_spi_setup;
+ master->transfer_one_message = tegra_spi_transfer_one_message;
+ master->num_chipselect = MAX_CHIP_SELECT;
+ master->auto_runtime_pm = true;
+
+ tspi->master = master;
+ tspi->dev = &pdev->dev;
+ spin_lock_init(&tspi->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tspi->base)) {
+ ret = PTR_ERR(tspi->base);
+ goto exit_free_master;
+ }
+ tspi->phys = r->start;
+
+ spi_irq = platform_get_irq(pdev, 0);
+ tspi->irq = spi_irq;
+ ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
+ tegra_spi_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_free_master;
+ }
+
+ tspi->clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto exit_free_irq;
+ }
+
+ tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tspi->rst);
+ goto exit_free_irq;
+ }
+
+ tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+
+ ret = tegra_spi_init_dma_param(tspi, true);
+ if (ret < 0)
+ goto exit_free_irq;
+ ret = tegra_spi_init_dma_param(tspi, false);
+ if (ret < 0)
+ goto exit_rx_dma_free;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+
+ init_completion(&tspi->xfer_completion);
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_spi_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+ tspi->def_command1_reg = SPI_M_S;
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_spi_runtime_suspend(&pdev->dev);
+ tegra_spi_deinit_dma_param(tspi, false);
+exit_rx_dma_free:
+ tegra_spi_deinit_dma_param(tspi, true);
+exit_free_irq:
+ free_irq(spi_irq, tspi);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+
+ free_irq(tspi->irq, tspi);
+
+ if (tspi->tx_dma_chan)
+ tegra_spi_deinit_dma_param(tspi, false);
+
+ if (tspi->rx_dma_chan)
+ tegra_spi_deinit_dma_param(tspi, true);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_spi_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_spi_readl(tspi, SPI_COMMAND1);
+
+ clk_disable_unprepare(tspi->clk);
+ return 0;
+}
+
+static int tegra_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_spi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend,
+ tegra_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume)
+};
+static struct platform_driver tegra_spi_driver = {
+ .driver = {
+ .name = "spi-tegra114",
+ .owner = THIS_MODULE,
+ .pm = &tegra_spi_pm_ops,
+ .of_match_table = tegra_spi_of_match,
+ },
+ .probe = tegra_spi_probe,
+ .remove = tegra_spi_remove,
+};
+module_platform_driver(tegra_spi_driver);
+
+MODULE_ALIAS("platform:spi-tegra114");
+MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
new file mode 100644
index 00000000000..3548ce25c08
--- /dev/null
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -0,0 +1,622 @@
+/*
+ * SPI driver for Nvidia's Tegra20 Serial Flash Controller.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define SPI_COMMAND 0x000
+#define SPI_GO BIT(30)
+#define SPI_M_S BIT(28)
+#define SPI_ACTIVE_SCLK_MASK (0x3 << 26)
+#define SPI_ACTIVE_SCLK_DRIVE_LOW (0 << 26)
+#define SPI_ACTIVE_SCLK_DRIVE_HIGH (1 << 26)
+#define SPI_ACTIVE_SCLK_PULL_LOW (2 << 26)
+#define SPI_ACTIVE_SCLK_PULL_HIGH (3 << 26)
+
+#define SPI_CK_SDA_FALLING (1 << 21)
+#define SPI_CK_SDA_RISING (0 << 21)
+#define SPI_CK_SDA_MASK (1 << 21)
+#define SPI_ACTIVE_SDA (0x3 << 18)
+#define SPI_ACTIVE_SDA_DRIVE_LOW (0 << 18)
+#define SPI_ACTIVE_SDA_DRIVE_HIGH (1 << 18)
+#define SPI_ACTIVE_SDA_PULL_LOW (2 << 18)
+#define SPI_ACTIVE_SDA_PULL_HIGH (3 << 18)
+
+#define SPI_CS_POL_INVERT BIT(16)
+#define SPI_TX_EN BIT(15)
+#define SPI_RX_EN BIT(14)
+#define SPI_CS_VAL_HIGH BIT(13)
+#define SPI_CS_VAL_LOW 0x0
+#define SPI_CS_SW BIT(12)
+#define SPI_CS_HW 0x0
+#define SPI_CS_DELAY_MASK (7 << 9)
+#define SPI_CS3_EN BIT(8)
+#define SPI_CS2_EN BIT(7)
+#define SPI_CS1_EN BIT(6)
+#define SPI_CS0_EN BIT(5)
+
+#define SPI_CS_MASK (SPI_CS3_EN | SPI_CS2_EN | \
+ SPI_CS1_EN | SPI_CS0_EN)
+#define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+
+#define SPI_MODES (SPI_ACTIVE_SCLK_MASK | SPI_CK_SDA_MASK)
+
+#define SPI_STATUS 0x004
+#define SPI_BSY BIT(31)
+#define SPI_RDY BIT(30)
+#define SPI_TXF_FLUSH BIT(29)
+#define SPI_RXF_FLUSH BIT(28)
+#define SPI_RX_UNF BIT(27)
+#define SPI_TX_OVF BIT(26)
+#define SPI_RXF_EMPTY BIT(25)
+#define SPI_RXF_FULL BIT(24)
+#define SPI_TXF_EMPTY BIT(23)
+#define SPI_TXF_FULL BIT(22)
+#define SPI_BLK_CNT(count) (((count) & 0xffff) + 1)
+
+#define SPI_FIFO_ERROR (SPI_RX_UNF | SPI_TX_OVF)
+#define SPI_FIFO_EMPTY (SPI_TX_EMPTY | SPI_RX_EMPTY)
+
+#define SPI_RX_CMP 0x8
+#define SPI_DMA_CTL 0x0C
+#define SPI_DMA_EN BIT(31)
+#define SPI_IE_RXC BIT(27)
+#define SPI_IE_TXC BIT(26)
+#define SPI_PACKED BIT(20)
+#define SPI_RX_TRIG_MASK (0x3 << 18)
+#define SPI_RX_TRIG_1W (0x0 << 18)
+#define SPI_RX_TRIG_4W (0x1 << 18)
+#define SPI_TX_TRIG_MASK (0x3 << 16)
+#define SPI_TX_TRIG_1W (0x0 << 16)
+#define SPI_TX_TRIG_4W (0x1 << 16)
+#define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF);
+
+#define SPI_TX_FIFO 0x10
+#define SPI_RX_FIFO 0x20
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define MAX_CHIP_SELECT 4
+#define SPI_FIFO_DEPTH 4
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+struct tegra_sflash_data {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *base;
+ unsigned irq;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned bytes_per_word;
+ unsigned cur_direction;
+ unsigned curr_xfer_words;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+
+ u32 def_command_reg;
+ u32 command_reg;
+ u32 dma_control_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+};
+
+static int tegra_sflash_runtime_suspend(struct device *dev);
+static int tegra_sflash_runtime_resume(struct device *dev);
+
+static inline u32 tegra_sflash_readl(struct tegra_sflash_data *tsd,
+ unsigned long reg)
+{
+ return readl(tsd->base + reg);
+}
+
+static inline void tegra_sflash_writel(struct tegra_sflash_data *tsd,
+ u32 val, unsigned long reg)
+{
+ writel(val, tsd->base + reg);
+}
+
+static void tegra_sflash_clear_status(struct tegra_sflash_data *tsd)
+{
+ /* Write 1 to clear status register */
+ tegra_sflash_writel(tsd, SPI_RDY | SPI_FIFO_ERROR, SPI_STATUS);
+}
+
+static unsigned tegra_sflash_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_sflash_data *tsd,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tsd->cur_pos;
+ unsigned max_word;
+
+ tsd->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
+ max_word = remain_len / tsd->bytes_per_word;
+ if (max_word > SPI_FIFO_DEPTH)
+ max_word = SPI_FIFO_DEPTH;
+ tsd->curr_xfer_words = max_word;
+ return max_word;
+}
+
+static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ u32 status;
+ unsigned max_n_32bit = tsd->curr_xfer_words;
+ u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos;
+
+ if (max_n_32bit > SPI_FIFO_DEPTH)
+ max_n_32bit = SPI_FIFO_DEPTH;
+ nbytes = max_n_32bit * tsd->bytes_per_word;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ while (!(status & SPI_TXF_FULL)) {
+ int i;
+ u32 x = 0;
+
+ for (i = 0; nbytes && (i < tsd->bytes_per_word);
+ i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_sflash_writel(tsd, x, SPI_TX_FIFO);
+ if (!nbytes)
+ break;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ }
+ tsd->cur_tx_pos += max_n_32bit * tsd->bytes_per_word;
+ return max_n_32bit;
+}
+
+static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ u32 status;
+ unsigned int read_words = 0;
+ u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ while (!(status & SPI_RXF_EMPTY)) {
+ int i;
+ u32 x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
+ for (i = 0; (i < tsd->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ read_words++;
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ }
+ tsd->cur_rx_pos += read_words * tsd->bytes_per_word;
+ return 0;
+}
+
+static int tegra_sflash_start_cpu_based_transfer(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ u32 val = 0;
+ unsigned cur_words;
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TXC;
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RXC;
+
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ tsd->dma_control_reg = val;
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_sflash_fill_tx_fifo_from_client_txbuf(tsd, t);
+ else
+ cur_words = tsd->curr_xfer_words;
+ val |= SPI_DMA_BLK_COUNT(cur_words);
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ tsd->dma_control_reg = val;
+ val |= SPI_DMA_EN;
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ return 0;
+}
+
+static int tegra_sflash_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg,
+ bool is_single_xfer)
+{
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u32 command;
+
+ speed = t->speed_hz;
+ if (speed != tsd->cur_speed) {
+ clk_set_rate(tsd->clk, speed);
+ tsd->cur_speed = speed;
+ }
+
+ tsd->cur_spi = spi;
+ tsd->cur_pos = 0;
+ tsd->cur_rx_pos = 0;
+ tsd->cur_tx_pos = 0;
+ tsd->curr_xfer = t;
+ tegra_sflash_calculate_curr_xfer_param(spi, tsd, t);
+ if (is_first_of_msg) {
+ command = tsd->def_command_reg;
+ command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
+ command |= SPI_CS_VAL_HIGH;
+
+ command &= ~SPI_MODES;
+ if (spi->mode & SPI_CPHA)
+ command |= SPI_CK_SDA_FALLING;
+
+ if (spi->mode & SPI_CPOL)
+ command |= SPI_ACTIVE_SCLK_DRIVE_HIGH;
+ else
+ command |= SPI_ACTIVE_SCLK_DRIVE_LOW;
+ command |= SPI_CS0_EN << spi->chip_select;
+ } else {
+ command = tsd->command_reg;
+ command &= ~SPI_BIT_LENGTH(~0);
+ command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
+ command &= ~(SPI_RX_EN | SPI_TX_EN);
+ }
+
+ tsd->cur_direction = 0;
+ if (t->rx_buf) {
+ command |= SPI_RX_EN;
+ tsd->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command |= SPI_TX_EN;
+ tsd->cur_direction |= DATA_DIR_TX;
+ }
+ tegra_sflash_writel(tsd, command, SPI_COMMAND);
+ tsd->command_reg = command;
+
+ return tegra_sflash_start_cpu_based_transfer(tsd, t);
+}
+
+static int tegra_sflash_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ int single_xfer;
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+ single_xfer = list_is_singular(&msg->transfers);
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ reinit_completion(&tsd->xfer_completion);
+ ret = tegra_sflash_start_transfer_one(spi, xfer,
+ is_first_msg, single_xfer);
+ if (ret < 0) {
+ dev_err(tsd->dev,
+ "spi can not start transfer, err %d\n", ret);
+ goto exit;
+ }
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tsd->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tsd->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (tsd->tx_status || tsd->rx_status) {
+ dev_err(tsd->dev, "Error in Transfer\n");
+ ret = -EIO;
+ goto exit;
+ }
+ msg->actual_length += xfer->len;
+ if (xfer->cs_change && xfer->delay_usecs) {
+ tegra_sflash_writel(tsd, tsd->def_command_reg,
+ SPI_COMMAND);
+ udelay(xfer->delay_usecs);
+ }
+ }
+ ret = 0;
+exit:
+ tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd)
+{
+ struct spi_transfer *t = tsd->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsd->lock, flags);
+ if (tsd->tx_status || tsd->rx_status || (tsd->status_reg & SPI_BSY)) {
+ dev_err(tsd->dev,
+ "CpuXfer ERROR bit set 0x%x\n", tsd->status_reg);
+ dev_err(tsd->dev,
+ "CpuXfer 0x%08x:0x%08x\n", tsd->command_reg,
+ tsd->dma_control_reg);
+ reset_control_assert(tsd->rst);
+ udelay(2);
+ reset_control_deassert(tsd->rst);
+ complete(&tsd->xfer_completion);
+ goto exit;
+ }
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ tegra_sflash_read_rx_fifo_to_client_rxbuf(tsd, t);
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ tsd->cur_pos = tsd->cur_tx_pos;
+ else
+ tsd->cur_pos = tsd->cur_rx_pos;
+
+ if (tsd->cur_pos == t->len) {
+ complete(&tsd->xfer_completion);
+ goto exit;
+ }
+
+ tegra_sflash_calculate_curr_xfer_param(tsd->cur_spi, tsd, t);
+ tegra_sflash_start_cpu_based_transfer(tsd, t);
+exit:
+ spin_unlock_irqrestore(&tsd->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
+{
+ struct tegra_sflash_data *tsd = context_data;
+
+ tsd->status_reg = tegra_sflash_readl(tsd, SPI_STATUS);
+ if (tsd->cur_direction & DATA_DIR_TX)
+ tsd->tx_status = tsd->status_reg & SPI_TX_OVF;
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ tsd->rx_status = tsd->status_reg & SPI_RX_UNF;
+ tegra_sflash_clear_status(tsd);
+
+ return handle_cpu_based_xfer(tsd);
+}
+
+static const struct of_device_id tegra_sflash_of_match[] = {
+ { .compatible = "nvidia,tegra20-sflash", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_sflash_of_match);
+
+static int tegra_sflash_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_sflash_data *tsd;
+ struct resource *r;
+ int ret;
+ const struct of_device_id *match;
+
+ match = of_match_device(tegra_sflash_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tsd));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->transfer_one_message = tegra_sflash_transfer_one_message;
+ master->auto_runtime_pm = true;
+ master->num_chipselect = MAX_CHIP_SELECT;
+
+ platform_set_drvdata(pdev, master);
+ tsd = spi_master_get_devdata(master);
+ tsd->master = master;
+ tsd->dev = &pdev->dev;
+ spin_lock_init(&tsd->lock);
+
+ if (of_property_read_u32(tsd->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tsd->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tsd->base)) {
+ ret = PTR_ERR(tsd->base);
+ goto exit_free_master;
+ }
+
+ tsd->irq = platform_get_irq(pdev, 0);
+ ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
+ dev_name(&pdev->dev), tsd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tsd->irq);
+ goto exit_free_master;
+ }
+
+ tsd->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tsd->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tsd->clk);
+ goto exit_free_irq;
+ }
+
+ tsd->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tsd->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tsd->rst);
+ goto exit_free_irq;
+ }
+
+ init_completion(&tsd->xfer_completion);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_sflash_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+
+ /* Reset controller */
+ reset_control_assert(tsd->rst);
+ udelay(2);
+ reset_control_deassert(tsd->rst);
+
+ tsd->def_command_reg = SPI_M_S | SPI_CS_SW;
+ tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_sflash_runtime_suspend(&pdev->dev);
+exit_free_irq:
+ free_irq(tsd->irq, tsd);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_sflash_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+
+ free_irq(tsd->irq, tsd);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_sflash_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_sflash_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_sflash_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_sflash_writel(tsd, tsd->command_reg, SPI_COMMAND);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_sflash_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_sflash_readl(tsd, SPI_COMMAND);
+
+ clk_disable_unprepare(tsd->clk);
+ return 0;
+}
+
+static int tegra_sflash_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tsd->clk);
+ if (ret < 0) {
+ dev_err(tsd->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops slink_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_sflash_runtime_suspend,
+ tegra_sflash_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_sflash_suspend, tegra_sflash_resume)
+};
+static struct platform_driver tegra_sflash_driver = {
+ .driver = {
+ .name = "spi-tegra-sflash",
+ .owner = THIS_MODULE,
+ .pm = &slink_pm_ops,
+ .of_match_table = tegra_sflash_of_match,
+ },
+ .probe = tegra_sflash_probe,
+ .remove = tegra_sflash_remove,
+};
+module_platform_driver(tegra_sflash_driver);
+
+MODULE_ALIAS("platform:spi-tegra-sflash");
+MODULE_DESCRIPTION("NVIDIA Tegra20 Serial Flash Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
new file mode 100644
index 00000000000..0b9e32e9f49
--- /dev/null
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -0,0 +1,1239 @@
+/*
+ * SPI driver for Nvidia's Tegra20/Tegra30 SLINK Controller.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define SLINK_COMMAND 0x000
+#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
+#define SLINK_BOTH_EN (1 << 10)
+#define SLINK_CS_SW (1 << 11)
+#define SLINK_CS_VALUE (1 << 12)
+#define SLINK_CS_POLARITY (1 << 13)
+#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
+#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
+#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
+#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
+#define SLINK_IDLE_SDA_MASK (3 << 16)
+#define SLINK_CS_POLARITY1 (1 << 20)
+#define SLINK_CK_SDA (1 << 21)
+#define SLINK_CS_POLARITY2 (1 << 22)
+#define SLINK_CS_POLARITY3 (1 << 23)
+#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
+#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
+#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
+#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
+#define SLINK_IDLE_SCLK_MASK (3 << 24)
+#define SLINK_M_S (1 << 28)
+#define SLINK_WAIT (1 << 29)
+#define SLINK_GO (1 << 30)
+#define SLINK_ENB (1 << 31)
+
+#define SLINK_MODES (SLINK_IDLE_SCLK_MASK | SLINK_CK_SDA)
+
+#define SLINK_COMMAND2 0x004
+#define SLINK_LSBFE (1 << 0)
+#define SLINK_SSOE (1 << 1)
+#define SLINK_SPIE (1 << 4)
+#define SLINK_BIDIROE (1 << 6)
+#define SLINK_MODFEN (1 << 7)
+#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
+#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
+#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
+#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
+#define SLINK_FIFO_REFILLS_0 (0 << 22)
+#define SLINK_FIFO_REFILLS_1 (1 << 22)
+#define SLINK_FIFO_REFILLS_2 (2 << 22)
+#define SLINK_FIFO_REFILLS_3 (3 << 22)
+#define SLINK_FIFO_REFILLS_MASK (3 << 22)
+#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
+#define SLINK_SPC0 (1 << 29)
+#define SLINK_TXEN (1 << 30)
+#define SLINK_RXEN (1 << 31)
+
+#define SLINK_STATUS 0x008
+#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
+#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
+#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
+#define SLINK_MODF (1 << 16)
+#define SLINK_RX_UNF (1 << 18)
+#define SLINK_TX_OVF (1 << 19)
+#define SLINK_TX_FULL (1 << 20)
+#define SLINK_TX_EMPTY (1 << 21)
+#define SLINK_RX_FULL (1 << 22)
+#define SLINK_RX_EMPTY (1 << 23)
+#define SLINK_TX_UNF (1 << 24)
+#define SLINK_RX_OVF (1 << 25)
+#define SLINK_TX_FLUSH (1 << 26)
+#define SLINK_RX_FLUSH (1 << 27)
+#define SLINK_SCLK (1 << 28)
+#define SLINK_ERR (1 << 29)
+#define SLINK_RDY (1 << 30)
+#define SLINK_BSY (1 << 31)
+#define SLINK_FIFO_ERROR (SLINK_TX_OVF | SLINK_RX_UNF | \
+ SLINK_TX_UNF | SLINK_RX_OVF)
+
+#define SLINK_FIFO_EMPTY (SLINK_TX_EMPTY | SLINK_RX_EMPTY)
+
+#define SLINK_MAS_DATA 0x010
+#define SLINK_SLAVE_DATA 0x014
+
+#define SLINK_DMA_CTL 0x018
+#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
+#define SLINK_TX_TRIG_1 (0 << 16)
+#define SLINK_TX_TRIG_4 (1 << 16)
+#define SLINK_TX_TRIG_8 (2 << 16)
+#define SLINK_TX_TRIG_16 (3 << 16)
+#define SLINK_TX_TRIG_MASK (3 << 16)
+#define SLINK_RX_TRIG_1 (0 << 18)
+#define SLINK_RX_TRIG_4 (1 << 18)
+#define SLINK_RX_TRIG_8 (2 << 18)
+#define SLINK_RX_TRIG_16 (3 << 18)
+#define SLINK_RX_TRIG_MASK (3 << 18)
+#define SLINK_PACKED (1 << 20)
+#define SLINK_PACK_SIZE_4 (0 << 21)
+#define SLINK_PACK_SIZE_8 (1 << 21)
+#define SLINK_PACK_SIZE_16 (2 << 21)
+#define SLINK_PACK_SIZE_32 (3 << 21)
+#define SLINK_PACK_SIZE_MASK (3 << 21)
+#define SLINK_IE_TXC (1 << 26)
+#define SLINK_IE_RXC (1 << 27)
+#define SLINK_DMA_EN (1 << 31)
+
+#define SLINK_STATUS2 0x01c
+#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
+#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
+#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
+
+#define SLINK_TX_FIFO 0x100
+#define SLINK_RX_FIFO 0x180
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
+#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
+#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
+
+#define SLINK_STATUS2_RESET \
+ (TX_FIFO_EMPTY_COUNT_MAX | RX_FIFO_FULL_COUNT_ZERO << 16)
+
+#define MAX_CHIP_SELECT 4
+#define SLINK_FIFO_DEPTH 32
+
+struct tegra_slink_chip_data {
+ bool cs_hold_time;
+};
+
+struct tegra_slink_data {
+ struct device *dev;
+ struct spi_master *master;
+ const struct tegra_slink_chip_data *chip_data;
+ spinlock_t lock;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *base;
+ phys_addr_t phys;
+ unsigned irq;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+ unsigned cur_direction;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+ u32 packed_size;
+
+ u32 command_reg;
+ u32 command2_reg;
+ u32 dma_control_reg;
+ u32 def_command_reg;
+ u32 def_command2_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+ struct dma_chan *rx_dma_chan;
+ u32 *rx_dma_buf;
+ dma_addr_t rx_dma_phys;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+
+ struct dma_chan *tx_dma_chan;
+ u32 *tx_dma_buf;
+ dma_addr_t tx_dma_phys;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+};
+
+static int tegra_slink_runtime_suspend(struct device *dev);
+static int tegra_slink_runtime_resume(struct device *dev);
+
+static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
+ u32 val, unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+
+ /* Read back register to make sure that register writes completed */
+ if (reg != SLINK_TX_FIFO)
+ readl(tspi->base + SLINK_MAS_DATA);
+}
+
+static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
+{
+ u32 val_write;
+
+ tegra_slink_readl(tspi, SLINK_STATUS);
+
+ /* Write 1 to clear status register */
+ val_write = SLINK_RDY | SLINK_FIFO_ERROR;
+ tegra_slink_writel(tspi, val_write, SLINK_STATUS);
+}
+
+static u32 tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
+ struct spi_transfer *t)
+{
+ switch (tspi->bytes_per_word) {
+ case 0:
+ return SLINK_PACK_SIZE_4;
+ case 1:
+ return SLINK_PACK_SIZE_8;
+ case 2:
+ return SLINK_PACK_SIZE_16;
+ case 4:
+ return SLINK_PACK_SIZE_32;
+ default:
+ return 0;
+ }
+}
+
+static unsigned tegra_slink_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_slink_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ bits_per_word = t->bits_per_word;
+ tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
+
+ if (bits_per_word == 8 || bits_per_word == 16) {
+ tspi->is_packed = 1;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = 0;
+ tspi->words_per_32bit = 1;
+ }
+ tspi->packed_size = tegra_slink_get_packed_size(tspi, t);
+
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = max_len/4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = max_word;
+ }
+ return total_fifo_words;
+}
+
+static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ u32 fifo_status;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned int written_words;
+ unsigned fifo_words_left;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+
+ fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
+ tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tspi->is_packed) {
+ fifo_words_left = tx_empty_count * tspi->words_per_32bit;
+ written_words = min(fifo_words_left, tspi->curr_dma_words);
+ nbytes = written_words * tspi->bytes_per_word;
+ max_n_32bit = DIV_ROUND_UP(nbytes, 4);
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ } else {
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ nbytes = written_words * tspi->bytes_per_word;
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ }
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ return written_words;
+}
+
+static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned rx_full_count;
+ u32 fifo_status;
+ unsigned i, count;
+ unsigned int read_words = 0;
+ unsigned len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+
+ fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
+ rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
+ } else {
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
+ }
+ return read_words;
+}
+
+static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+
+ if (tspi->is_packed) {
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = 0;
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tspi->tx_dma_buf[count] = x;
+ }
+ }
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+}
+
+static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = tspi->rx_dma_buf[count] & rx_mask;
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+}
+
+static void tegra_slink_dma_complete(void *args)
+{
+ struct completion *dma_complete = args;
+
+ complete(dma_complete);
+}
+
+static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
+{
+ reinit_completion(&tspi->tx_dma_complete);
+ tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
+ tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->tx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Tx\n");
+ return -EIO;
+ }
+
+ tspi->tx_dma_desc->callback = tegra_slink_dma_complete;
+ tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
+
+ dmaengine_submit(tspi->tx_dma_desc);
+ dma_async_issue_pending(tspi->tx_dma_chan);
+ return 0;
+}
+
+static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
+{
+ reinit_completion(&tspi->rx_dma_complete);
+ tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
+ tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->rx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Rx\n");
+ return -EIO;
+ }
+
+ tspi->rx_dma_desc->callback = tegra_slink_dma_complete;
+ tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
+
+ dmaengine_submit(tspi->rx_dma_desc);
+ dma_async_issue_pending(tspi->rx_dma_chan);
+ return 0;
+}
+
+static int tegra_slink_start_dma_based_transfer(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned int len;
+ int ret = 0;
+ u32 status;
+
+ /* Make sure that Rx and Tx fifo are empty */
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
+ dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
+ (unsigned)status);
+ return -EIO;
+ }
+
+ val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
+ val |= tspi->packed_size;
+ if (tspi->is_packed)
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
+
+ /* Set attention level based on length of transfer */
+ if (len & 0xF)
+ val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
+ else if (((len) >> 4) & 0x1)
+ val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
+ else
+ val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ tegra_slink_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ wmb();
+ ret = tegra_slink_start_tx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting tx dma failed, err %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for tx fifo to be fill before starting slink */
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ while (!(status & SLINK_TX_FULL))
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ ret = tegra_slink_start_rx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting rx dma failed, err %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ /* HW need small delay after settign Packed mode */
+ udelay(1);
+ }
+ tspi->dma_control_reg = val;
+
+ val |= SLINK_DMA_EN;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ return ret;
+}
+
+static int tegra_slink_start_cpu_based_transfer(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned cur_words;
+
+ val = tspi->packed_size;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_slink_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ cur_words = tspi->curr_dma_words;
+ val |= SLINK_DMA_BLOCK_SIZE(cur_words - 1);
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ udelay(1);
+ wmb();
+ }
+ tspi->dma_control_reg = val;
+ val |= SLINK_DMA_EN;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ return 0;
+}
+
+static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ int ret;
+ struct dma_slave_config dma_sconfig;
+
+ dma_chan = dma_request_slave_channel_reason(tspi->dev,
+ dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(tspi->dev,
+ "Dma channel is not available: %d\n", ret);
+ return ret;
+ }
+
+ dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
+ &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
+ dma_release_channel(dma_chan);
+ return -ENOMEM;
+ }
+
+ if (dma_to_memory) {
+ dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = 0;
+ } else {
+ dma_sconfig.dst_addr = tspi->phys + SLINK_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = 0;
+ }
+
+ ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
+ if (ret)
+ goto scrub;
+ if (dma_to_memory) {
+ tspi->rx_dma_chan = dma_chan;
+ tspi->rx_dma_buf = dma_buf;
+ tspi->rx_dma_phys = dma_phys;
+ } else {
+ tspi->tx_dma_chan = dma_chan;
+ tspi->tx_dma_buf = dma_buf;
+ tspi->tx_dma_phys = dma_phys;
+ }
+ return 0;
+
+scrub:
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+ return ret;
+}
+
+static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
+ bool dma_to_memory)
+{
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ struct dma_chan *dma_chan;
+
+ if (dma_to_memory) {
+ dma_buf = tspi->rx_dma_buf;
+ dma_chan = tspi->rx_dma_chan;
+ dma_phys = tspi->rx_dma_phys;
+ tspi->rx_dma_chan = NULL;
+ tspi->rx_dma_buf = NULL;
+ } else {
+ dma_buf = tspi->tx_dma_buf;
+ dma_chan = tspi->tx_dma_chan;
+ dma_phys = tspi->tx_dma_phys;
+ tspi->tx_dma_buf = NULL;
+ tspi->tx_dma_chan = NULL;
+ }
+ if (!dma_chan)
+ return;
+
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+}
+
+static int tegra_slink_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u8 bits_per_word;
+ unsigned total_fifo_words;
+ int ret;
+ u32 command;
+ u32 command2;
+
+ bits_per_word = t->bits_per_word;
+ speed = t->speed_hz;
+ if (speed != tspi->cur_speed) {
+ clk_set_rate(tspi->clk, speed * 4);
+ tspi->cur_speed = speed;
+ }
+
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->curr_xfer = t;
+ total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
+
+ command = tspi->command_reg;
+ command &= ~SLINK_BIT_LENGTH(~0);
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ command2 = tspi->command2_reg;
+ command2 &= ~(SLINK_RXEN | SLINK_TXEN);
+
+ tegra_slink_writel(tspi, command, SLINK_COMMAND);
+ tspi->command_reg = command;
+
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ command2 |= SLINK_RXEN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command2 |= SLINK_TXEN;
+ tspi->cur_direction |= DATA_DIR_TX;
+ }
+ tegra_slink_writel(tspi, command2, SLINK_COMMAND2);
+ tspi->command2_reg = command2;
+
+ if (total_fifo_words > SLINK_FIFO_DEPTH)
+ ret = tegra_slink_start_dma_based_transfer(tspi, t);
+ else
+ ret = tegra_slink_start_cpu_based_transfer(tspi, t);
+ return ret;
+}
+
+static int tegra_slink_setup(struct spi_device *spi)
+{
+ static const u32 cs_pol_bit[MAX_CHIP_SELECT] = {
+ SLINK_CS_POLARITY,
+ SLINK_CS_POLARITY1,
+ SLINK_CS_POLARITY2,
+ SLINK_CS_POLARITY3,
+ };
+
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ u32 val;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+ ret = pm_runtime_get_sync(tspi->dev);
+ if (ret < 0) {
+ dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ val = tspi->def_command_reg;
+ if (spi->mode & SPI_CS_HIGH)
+ val |= cs_pol_bit[spi->chip_select];
+ else
+ val &= ~cs_pol_bit[spi->chip_select];
+ tspi->def_command_reg = val;
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
+static int tegra_slink_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+
+ tegra_slink_clear_status(tspi);
+
+ tspi->command_reg = tspi->def_command_reg;
+ tspi->command_reg |= SLINK_CS_SW | SLINK_CS_VALUE;
+
+ tspi->command2_reg = tspi->def_command2_reg;
+ tspi->command2_reg |= SLINK_SS_EN_CS(spi->chip_select);
+
+ tspi->command_reg &= ~SLINK_MODES;
+ if (spi->mode & SPI_CPHA)
+ tspi->command_reg |= SLINK_CK_SDA;
+
+ if (spi->mode & SPI_CPOL)
+ tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_LOW;
+
+ return 0;
+}
+
+static int tegra_slink_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ reinit_completion(&tspi->xfer_completion);
+ ret = tegra_slink_start_transfer_one(spi, xfer);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "spi can not start transfer, err %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&tspi->xfer_completion,
+ SLINK_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ return -EIO;
+ }
+
+ if (tspi->tx_status)
+ return tspi->tx_status;
+ if (tspi->rx_status)
+ return tspi->rx_status;
+
+ return 0;
+}
+
+static int tegra_slink_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+
+ return 0;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status ||
+ (tspi->status_reg & SLINK_BSY)) {
+ dev_err(tspi->dev,
+ "CpuXfer ERROR bit set 0x%x\n", tspi->status_reg);
+ dev_err(tspi->dev,
+ "CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
+ tspi->command2_reg, tspi->dma_control_reg);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_slink_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
+ tegra_slink_start_cpu_based_transfer(tspi, t);
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ long wait_status;
+ int err = 0;
+ unsigned total_fifo_words;
+ unsigned long flags;
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ dev_err(tspi->dev, "TxDma Xfer failed\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ dev_err(tspi->dev, "RxDma Xfer failed\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(tspi->dev,
+ "DmaXfer: ERROR bit set 0x%x\n", tspi->status_reg);
+ dev_err(tspi->dev,
+ "DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
+ tspi->command2_reg, tspi->dma_control_reg);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_assert(tspi->rst);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_slink_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ /* Continue transfer in current message */
+ total_fifo_words = tegra_slink_calculate_curr_xfer_param(tspi->cur_spi,
+ tspi, t);
+ if (total_fifo_words > SLINK_FIFO_DEPTH)
+ err = tegra_slink_start_dma_based_transfer(tspi, t);
+ else
+ err = tegra_slink_start_cpu_based_transfer(tspi, t);
+
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_slink_isr_thread(int irq, void *context_data)
+{
+ struct tegra_slink_data *tspi = context_data;
+
+ if (!tspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tspi);
+ return handle_dma_based_xfer(tspi);
+}
+
+static irqreturn_t tegra_slink_isr(int irq, void *context_data)
+{
+ struct tegra_slink_data *tspi = context_data;
+
+ tspi->status_reg = tegra_slink_readl(tspi, SLINK_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SLINK_TX_OVF | SLINK_TX_UNF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SLINK_RX_OVF | SLINK_RX_UNF);
+ tegra_slink_clear_status(tspi);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static const struct tegra_slink_chip_data tegra30_spi_cdata = {
+ .cs_hold_time = true,
+};
+
+static const struct tegra_slink_chip_data tegra20_spi_cdata = {
+ .cs_hold_time = false,
+};
+
+static const struct of_device_id tegra_slink_of_match[] = {
+ { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
+ { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_slink_of_match);
+
+static int tegra_slink_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_slink_data *tspi;
+ struct resource *r;
+ int ret, spi_irq;
+ const struct tegra_slink_chip_data *cdata = NULL;
+ const struct of_device_id *match;
+
+ match = of_match_device(tegra_slink_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ cdata = match->data;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tegra_slink_setup;
+ master->prepare_message = tegra_slink_prepare_message;
+ master->transfer_one = tegra_slink_transfer_one;
+ master->unprepare_message = tegra_slink_unprepare_message;
+ master->auto_runtime_pm = true;
+ master->num_chipselect = MAX_CHIP_SELECT;
+
+ platform_set_drvdata(pdev, master);
+ tspi = spi_master_get_devdata(master);
+ tspi->master = master;
+ tspi->dev = &pdev->dev;
+ tspi->chip_data = cdata;
+ spin_lock_init(&tspi->lock);
+
+ if (of_property_read_u32(tspi->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "No IO memory resource\n");
+ ret = -ENODEV;
+ goto exit_free_master;
+ }
+ tspi->phys = r->start;
+ tspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tspi->base)) {
+ ret = PTR_ERR(tspi->base);
+ goto exit_free_master;
+ }
+
+ spi_irq = platform_get_irq(pdev, 0);
+ tspi->irq = spi_irq;
+ ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+ tegra_slink_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_free_master;
+ }
+
+ tspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto exit_free_irq;
+ }
+
+ tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tspi->rst);
+ goto exit_free_irq;
+ }
+
+ tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+
+ ret = tegra_slink_init_dma_param(tspi, true);
+ if (ret < 0)
+ goto exit_free_irq;
+ ret = tegra_slink_init_dma_param(tspi, false);
+ if (ret < 0)
+ goto exit_rx_dma_free;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+
+ init_completion(&tspi->xfer_completion);
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_slink_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+ tspi->def_command_reg = SLINK_M_S;
+ tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_slink_runtime_suspend(&pdev->dev);
+ tegra_slink_deinit_dma_param(tspi, false);
+exit_rx_dma_free:
+ tegra_slink_deinit_dma_param(tspi, true);
+exit_free_irq:
+ free_irq(spi_irq, tspi);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_slink_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ free_irq(tspi->irq, tspi);
+
+ if (tspi->tx_dma_chan)
+ tegra_slink_deinit_dma_param(tspi, false);
+
+ if (tspi->rx_dma_chan)
+ tegra_slink_deinit_dma_param(tspi, true);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_slink_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_slink_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_slink_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_slink_writel(tspi, tspi->command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_slink_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_slink_readl(tspi, SLINK_MAS_DATA);
+
+ clk_disable_unprepare(tspi->clk);
+ return 0;
+}
+
+static int tegra_slink_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops slink_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
+ tegra_slink_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_slink_suspend, tegra_slink_resume)
+};
+static struct platform_driver tegra_slink_driver = {
+ .driver = {
+ .name = "spi-tegra-slink",
+ .owner = THIS_MODULE,
+ .pm = &slink_pm_ops,
+ .of_match_table = tegra_slink_of_match,
+ },
+ .probe = tegra_slink_probe,
+ .remove = tegra_slink_remove,
+};
+module_platform_driver(tegra_slink_driver);
+
+MODULE_ALIAS("platform:spi-tegra-slink");
+MODULE_DESCRIPTION("NVIDIA Tegra20/Tegra30 SLINK Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
new file mode 100644
index 00000000000..6c211d1910b
--- /dev/null
+++ b/drivers/spi/spi-ti-qspi.c
@@ -0,0 +1,581 @@
+/*
+ * TI QSPI driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sourav Poddar <sourav.poddar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GPLv2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <linux/spi/spi.h>
+
+struct ti_qspi_regs {
+ u32 clkctrl;
+};
+
+struct ti_qspi {
+ struct completion transfer_complete;
+
+ /* list synchronization */
+ struct mutex list_lock;
+
+ struct spi_master *master;
+ void __iomem *base;
+ void __iomem *ctrl_base;
+ void __iomem *mmap_base;
+ struct clk *fclk;
+ struct device *dev;
+
+ struct ti_qspi_regs ctx_reg;
+
+ u32 spi_max_frequency;
+ u32 cmd;
+ u32 dc;
+
+ bool ctrl_mod;
+};
+
+#define QSPI_PID (0x0)
+#define QSPI_SYSCONFIG (0x10)
+#define QSPI_INTR_STATUS_RAW_SET (0x20)
+#define QSPI_INTR_STATUS_ENABLED_CLEAR (0x24)
+#define QSPI_INTR_ENABLE_SET_REG (0x28)
+#define QSPI_INTR_ENABLE_CLEAR_REG (0x2c)
+#define QSPI_SPI_CLOCK_CNTRL_REG (0x40)
+#define QSPI_SPI_DC_REG (0x44)
+#define QSPI_SPI_CMD_REG (0x48)
+#define QSPI_SPI_STATUS_REG (0x4c)
+#define QSPI_SPI_DATA_REG (0x50)
+#define QSPI_SPI_SETUP0_REG (0x54)
+#define QSPI_SPI_SWITCH_REG (0x64)
+#define QSPI_SPI_SETUP1_REG (0x58)
+#define QSPI_SPI_SETUP2_REG (0x5c)
+#define QSPI_SPI_SETUP3_REG (0x60)
+#define QSPI_SPI_DATA_REG_1 (0x68)
+#define QSPI_SPI_DATA_REG_2 (0x6c)
+#define QSPI_SPI_DATA_REG_3 (0x70)
+
+#define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
+#define QSPI_FCLK 192000000
+
+/* Clock Control */
+#define QSPI_CLK_EN (1 << 31)
+#define QSPI_CLK_DIV_MAX 0xffff
+
+/* Command */
+#define QSPI_EN_CS(n) (n << 28)
+#define QSPI_WLEN(n) ((n - 1) << 19)
+#define QSPI_3_PIN (1 << 18)
+#define QSPI_RD_SNGL (1 << 16)
+#define QSPI_WR_SNGL (2 << 16)
+#define QSPI_RD_DUAL (3 << 16)
+#define QSPI_RD_QUAD (7 << 16)
+#define QSPI_INVAL (4 << 16)
+#define QSPI_WC_CMD_INT_EN (1 << 14)
+#define QSPI_FLEN(n) ((n - 1) << 0)
+
+/* STATUS REGISTER */
+#define WC 0x02
+
+/* INTERRUPT REGISTER */
+#define QSPI_WC_INT_EN (1 << 1)
+#define QSPI_WC_INT_DISABLE (1 << 1)
+
+/* Device Control */
+#define QSPI_DD(m, n) (m << (3 + n * 8))
+#define QSPI_CKPHA(n) (1 << (2 + n * 8))
+#define QSPI_CSPOL(n) (1 << (1 + n * 8))
+#define QSPI_CKPOL(n) (1 << (n * 8))
+
+#define QSPI_FRAME 4096
+
+#define QSPI_AUTOSUSPEND_TIMEOUT 2000
+
+static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
+ unsigned long reg)
+{
+ return readl(qspi->base + reg);
+}
+
+static inline void ti_qspi_write(struct ti_qspi *qspi,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, qspi->base + reg);
+}
+
+static int ti_qspi_setup(struct spi_device *spi)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
+ int clk_div = 0, ret;
+ u32 clk_ctrl_reg, clk_rate, clk_mask;
+
+ if (spi->master->busy) {
+ dev_dbg(qspi->dev, "master busy doing other trasnfers\n");
+ return -EBUSY;
+ }
+
+ if (!qspi->spi_max_frequency) {
+ dev_err(qspi->dev, "spi max frequency not defined\n");
+ return -EINVAL;
+ }
+
+ clk_rate = clk_get_rate(qspi->fclk);
+
+ clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1;
+
+ if (clk_div < 0) {
+ dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n");
+ return -EINVAL;
+ }
+
+ if (clk_div > QSPI_CLK_DIV_MAX) {
+ dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n",
+ QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1);
+ return -EINVAL;
+ }
+
+ dev_dbg(qspi->dev, "hz: %d, clock divider %d\n",
+ qspi->spi_max_frequency, clk_div);
+
+ ret = pm_runtime_get_sync(qspi->dev);
+ if (ret < 0) {
+ dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
+ return ret;
+ }
+
+ clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
+
+ clk_ctrl_reg &= ~QSPI_CLK_EN;
+
+ /* disable SCLK */
+ ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
+
+ /* enable SCLK */
+ clk_mask = QSPI_CLK_EN | clk_div;
+ ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG);
+ ctx_reg->clkctrl = clk_mask;
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ ret = pm_runtime_put_autosuspend(qspi->dev);
+ if (ret < 0) {
+ dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
+{
+ struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
+
+ ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
+}
+
+static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+{
+ int wlen, count, ret;
+ unsigned int cmd;
+ const u8 *txbuf;
+
+ txbuf = t->tx_buf;
+ cmd = qspi->cmd | QSPI_WR_SNGL;
+ count = t->len;
+ wlen = t->bits_per_word >> 3; /* in bytes */
+
+ while (count) {
+ switch (wlen) {
+ case 1:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
+ cmd, qspi->dc, *txbuf);
+ writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ case 2:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
+ cmd, qspi->dc, *txbuf);
+ writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ case 4:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
+ cmd, qspi->dc, *txbuf);
+ writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ }
+
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT);
+ if (ret == 0) {
+ dev_err(qspi->dev, "write timed out\n");
+ return -ETIMEDOUT;
+ }
+ txbuf += wlen;
+ count -= wlen;
+ }
+
+ return 0;
+}
+
+static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+{
+ int wlen, count, ret;
+ unsigned int cmd;
+ u8 *rxbuf;
+
+ rxbuf = t->rx_buf;
+ cmd = qspi->cmd;
+ switch (t->rx_nbits) {
+ case SPI_NBITS_DUAL:
+ cmd |= QSPI_RD_DUAL;
+ break;
+ case SPI_NBITS_QUAD:
+ cmd |= QSPI_RD_QUAD;
+ break;
+ default:
+ cmd |= QSPI_RD_SNGL;
+ break;
+ }
+ count = t->len;
+ wlen = t->bits_per_word >> 3; /* in bytes */
+
+ while (count) {
+ dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT);
+ if (ret == 0) {
+ dev_err(qspi->dev, "read timed out\n");
+ return -ETIMEDOUT;
+ }
+ switch (wlen) {
+ case 1:
+ *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ case 2:
+ *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ case 4:
+ *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ }
+ rxbuf += wlen;
+ count -= wlen;
+ }
+
+ return 0;
+}
+
+static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+{
+ int ret;
+
+ if (t->tx_buf) {
+ ret = qspi_write_msg(qspi, t);
+ if (ret) {
+ dev_dbg(qspi->dev, "Error while writing\n");
+ return ret;
+ }
+ }
+
+ if (t->rx_buf) {
+ ret = qspi_read_msg(qspi, t);
+ if (ret) {
+ dev_dbg(qspi->dev, "Error while reading\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ti_qspi_start_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t;
+ int status = 0, ret;
+ int frame_length;
+
+ /* setup device control reg */
+ qspi->dc = 0;
+
+ if (spi->mode & SPI_CPHA)
+ qspi->dc |= QSPI_CKPHA(spi->chip_select);
+ if (spi->mode & SPI_CPOL)
+ qspi->dc |= QSPI_CKPOL(spi->chip_select);
+ if (spi->mode & SPI_CS_HIGH)
+ qspi->dc |= QSPI_CSPOL(spi->chip_select);
+
+ frame_length = (m->frame_length << 3) / spi->bits_per_word;
+
+ frame_length = clamp(frame_length, 0, QSPI_FRAME);
+
+ /* setup command reg */
+ qspi->cmd = 0;
+ qspi->cmd |= QSPI_EN_CS(spi->chip_select);
+ qspi->cmd |= QSPI_FLEN(frame_length);
+ qspi->cmd |= QSPI_WC_CMD_INT_EN;
+
+ ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG);
+ ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
+
+ mutex_lock(&qspi->list_lock);
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ qspi->cmd |= QSPI_WLEN(t->bits_per_word);
+
+ ret = qspi_transfer_msg(qspi, t);
+ if (ret) {
+ dev_dbg(qspi->dev, "transfer message failed\n");
+ mutex_unlock(&qspi->list_lock);
+ return -EINVAL;
+ }
+
+ m->actual_length += t->len;
+ }
+
+ mutex_unlock(&qspi->list_lock);
+
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
+
+ return status;
+}
+
+static irqreturn_t ti_qspi_isr(int irq, void *dev_id)
+{
+ struct ti_qspi *qspi = dev_id;
+ u16 int_stat;
+ u32 stat;
+
+ irqreturn_t ret = IRQ_HANDLED;
+
+ int_stat = ti_qspi_read(qspi, QSPI_INTR_STATUS_ENABLED_CLEAR);
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+
+ if (!int_stat) {
+ dev_dbg(qspi->dev, "No IRQ triggered\n");
+ ret = IRQ_NONE;
+ goto out;
+ }
+
+ ti_qspi_write(qspi, QSPI_WC_INT_DISABLE,
+ QSPI_INTR_STATUS_ENABLED_CLEAR);
+ if (stat & WC)
+ complete(&qspi->transfer_complete);
+out:
+ return ret;
+}
+
+static int ti_qspi_runtime_resume(struct device *dev)
+{
+ struct ti_qspi *qspi;
+
+ qspi = dev_get_drvdata(dev);
+ ti_qspi_restore_ctx(qspi);
+
+ return 0;
+}
+
+static const struct of_device_id ti_qspi_match[] = {
+ {.compatible = "ti,dra7xxx-qspi" },
+ {.compatible = "ti,am4372-qspi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_qspi_match);
+
+static int ti_qspi_probe(struct platform_device *pdev)
+{
+ struct ti_qspi *qspi;
+ struct spi_master *master;
+ struct resource *r, *res_ctrl, *res_mmap;
+ struct device_node *np = pdev->dev.of_node;
+ u32 max_freq;
+ int ret = 0, num_cs, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
+ if (!master)
+ return -ENOMEM;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
+
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->setup = ti_qspi_setup;
+ master->auto_runtime_pm = true;
+ master->transfer_one_message = ti_qspi_start_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
+
+ if (!of_property_read_u32(np, "num-cs", &num_cs))
+ master->num_chipselect = num_cs;
+
+ qspi = spi_master_get_devdata(master);
+ qspi->master = master;
+ qspi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, qspi);
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
+ if (r == NULL) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+ }
+
+ res_mmap = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "qspi_mmap");
+ if (res_mmap == NULL) {
+ res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_mmap == NULL) {
+ dev_err(&pdev->dev,
+ "memory mapped resource not required\n");
+ }
+ }
+
+ res_ctrl = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "qspi_ctrlmod");
+ if (res_ctrl == NULL) {
+ res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (res_ctrl == NULL) {
+ dev_dbg(&pdev->dev,
+ "control module resources not required\n");
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return irq;
+ }
+
+ mutex_init(&qspi->list_lock);
+
+ qspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(qspi->base)) {
+ ret = PTR_ERR(qspi->base);
+ goto free_master;
+ }
+
+ if (res_ctrl) {
+ qspi->ctrl_mod = true;
+ qspi->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl);
+ if (IS_ERR(qspi->ctrl_base)) {
+ ret = PTR_ERR(qspi->ctrl_base);
+ goto free_master;
+ }
+ }
+
+ if (res_mmap) {
+ qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
+ if (IS_ERR(qspi->mmap_base)) {
+ ret = PTR_ERR(qspi->mmap_base);
+ goto free_master;
+ }
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, ti_qspi_isr, 0,
+ dev_name(&pdev->dev), qspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ irq);
+ goto free_master;
+ }
+
+ qspi->fclk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(qspi->fclk)) {
+ ret = PTR_ERR(qspi->fclk);
+ dev_err(&pdev->dev, "could not get clk: %d\n", ret);
+ }
+
+ init_completion(&qspi->transfer_complete);
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_enable(&pdev->dev);
+
+ if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
+ qspi->spi_max_frequency = max_freq;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret)
+ goto free_master;
+
+ return 0;
+
+free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int ti_qspi_remove(struct platform_device *pdev)
+{
+ struct ti_qspi *qspi = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_get_sync(qspi->dev);
+ if (ret < 0) {
+ dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
+ return ret;
+ }
+
+ ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
+
+ pm_runtime_put(qspi->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ti_qspi_pm_ops = {
+ .runtime_resume = ti_qspi_runtime_resume,
+};
+
+static struct platform_driver ti_qspi_driver = {
+ .probe = ti_qspi_probe,
+ .remove = ti_qspi_remove,
+ .driver = {
+ .name = "ti-qspi",
+ .owner = THIS_MODULE,
+ .pm = &ti_qspi_pm_ops,
+ .of_match_table = ti_qspi_match,
+ }
+};
+
+module_platform_driver(ti_qspi_driver);
+
+MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI QSPI controller driver");
+MODULE_ALIAS("platform:ti-qspi");
diff --git a/drivers/spi/spi-ti-ssp.c b/drivers/spi/spi-ti-ssp.c
deleted file mode 100644
index 3f6f6e81c65..00000000000
--- a/drivers/spi/spi-ti-ssp.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Sequencer Serial Port (SSP) based SPI master driver
- *
- * Copyright (C) 2010 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/mfd/ti_ssp.h>
-
-#define MODE_BITS (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH)
-
-struct ti_ssp_spi {
- struct spi_master *master;
- struct device *dev;
- spinlock_t lock;
- struct list_head msg_queue;
- struct completion complete;
- bool shutdown;
- struct workqueue_struct *workqueue;
- struct work_struct work;
- u8 mode, bpw;
- int cs_active;
- u32 pc_en, pc_dis, pc_wr, pc_rd;
- void (*select)(int cs);
-};
-
-static u32 ti_ssp_spi_rx(struct ti_ssp_spi *hw)
-{
- u32 ret;
-
- ti_ssp_run(hw->dev, hw->pc_rd, 0, &ret);
- return ret;
-}
-
-static void ti_ssp_spi_tx(struct ti_ssp_spi *hw, u32 data)
-{
- ti_ssp_run(hw->dev, hw->pc_wr, data << (32 - hw->bpw), NULL);
-}
-
-static int ti_ssp_spi_txrx(struct ti_ssp_spi *hw, struct spi_message *msg,
- struct spi_transfer *t)
-{
- int count;
-
- if (hw->bpw <= 8) {
- u8 *rx = t->rx_buf;
- const u8 *tx = t->tx_buf;
-
- for (count = 0; count < t->len; count += 1) {
- if (t->tx_buf)
- ti_ssp_spi_tx(hw, *tx++);
- if (t->rx_buf)
- *rx++ = ti_ssp_spi_rx(hw);
- }
- } else if (hw->bpw <= 16) {
- u16 *rx = t->rx_buf;
- const u16 *tx = t->tx_buf;
-
- for (count = 0; count < t->len; count += 2) {
- if (t->tx_buf)
- ti_ssp_spi_tx(hw, *tx++);
- if (t->rx_buf)
- *rx++ = ti_ssp_spi_rx(hw);
- }
- } else {
- u32 *rx = t->rx_buf;
- const u32 *tx = t->tx_buf;
-
- for (count = 0; count < t->len; count += 4) {
- if (t->tx_buf)
- ti_ssp_spi_tx(hw, *tx++);
- if (t->rx_buf)
- *rx++ = ti_ssp_spi_rx(hw);
- }
- }
-
- msg->actual_length += count; /* bytes transferred */
-
- dev_dbg(&msg->spi->dev, "xfer %s%s, %d bytes, %d bpw, count %d%s\n",
- t->tx_buf ? "tx" : "", t->rx_buf ? "rx" : "", t->len,
- hw->bpw, count, (count < t->len) ? " (under)" : "");
-
- return (count < t->len) ? -EIO : 0; /* left over data */
-}
-
-static void ti_ssp_spi_chip_select(struct ti_ssp_spi *hw, int cs_active)
-{
- cs_active = !!cs_active;
- if (cs_active == hw->cs_active)
- return;
- ti_ssp_run(hw->dev, cs_active ? hw->pc_en : hw->pc_dis, 0, NULL);
- hw->cs_active = cs_active;
-}
-
-#define __SHIFT_OUT(bits) (SSP_OPCODE_SHIFT | SSP_OUT_MODE | \
- cs_en | clk | SSP_COUNT((bits) * 2 - 1))
-#define __SHIFT_IN(bits) (SSP_OPCODE_SHIFT | SSP_IN_MODE | \
- cs_en | clk | SSP_COUNT((bits) * 2 - 1))
-
-static int ti_ssp_spi_setup_transfer(struct ti_ssp_spi *hw, u8 bpw, u8 mode)
-{
- int error, idx = 0;
- u32 seqram[16];
- u32 cs_en, cs_dis, clk;
- u32 topbits, botbits;
-
- mode &= MODE_BITS;
- if (mode == hw->mode && bpw == hw->bpw)
- return 0;
-
- cs_en = (mode & SPI_CS_HIGH) ? SSP_CS_HIGH : SSP_CS_LOW;
- cs_dis = (mode & SPI_CS_HIGH) ? SSP_CS_LOW : SSP_CS_HIGH;
- clk = (mode & SPI_CPOL) ? SSP_CLK_HIGH : SSP_CLK_LOW;
-
- /* Construct instructions */
-
- /* Disable Chip Select */
- hw->pc_dis = idx;
- seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_dis | clk;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_dis | clk;
-
- /* Enable Chip Select */
- hw->pc_en = idx;
- seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_en | clk;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
-
- /* Reads and writes need to be split for bpw > 16 */
- topbits = (bpw > 16) ? 16 : bpw;
- botbits = bpw - topbits;
-
- /* Write */
- hw->pc_wr = idx;
- seqram[idx++] = __SHIFT_OUT(topbits) | SSP_ADDR_REG;
- if (botbits)
- seqram[idx++] = __SHIFT_OUT(botbits) | SSP_DATA_REG;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
-
- /* Read */
- hw->pc_rd = idx;
- if (botbits)
- seqram[idx++] = __SHIFT_IN(botbits) | SSP_ADDR_REG;
- seqram[idx++] = __SHIFT_IN(topbits) | SSP_DATA_REG;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
-
- error = ti_ssp_load(hw->dev, 0, seqram, idx);
- if (error < 0)
- return error;
-
- error = ti_ssp_set_mode(hw->dev, ((mode & SPI_CPHA) ?
- 0 : SSP_EARLY_DIN));
- if (error < 0)
- return error;
-
- hw->bpw = bpw;
- hw->mode = mode;
-
- return error;
-}
-
-static void ti_ssp_spi_work(struct work_struct *work)
-{
- struct ti_ssp_spi *hw = container_of(work, struct ti_ssp_spi, work);
-
- spin_lock(&hw->lock);
-
- while (!list_empty(&hw->msg_queue)) {
- struct spi_message *m;
- struct spi_device *spi;
- struct spi_transfer *t = NULL;
- int status = 0;
-
- m = container_of(hw->msg_queue.next, struct spi_message,
- queue);
-
- list_del_init(&m->queue);
-
- spin_unlock(&hw->lock);
-
- spi = m->spi;
-
- if (hw->select)
- hw->select(spi->chip_select);
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- int bpw = spi->bits_per_word;
- int xfer_status;
-
- if (t->bits_per_word)
- bpw = t->bits_per_word;
-
- if (ti_ssp_spi_setup_transfer(hw, bpw, spi->mode) < 0)
- break;
-
- ti_ssp_spi_chip_select(hw, 1);
-
- xfer_status = ti_ssp_spi_txrx(hw, m, t);
- if (xfer_status < 0)
- status = xfer_status;
-
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- if (t->cs_change)
- ti_ssp_spi_chip_select(hw, 0);
- }
-
- ti_ssp_spi_chip_select(hw, 0);
- m->status = status;
- m->complete(m->context);
-
- spin_lock(&hw->lock);
- }
-
- if (hw->shutdown)
- complete(&hw->complete);
-
- spin_unlock(&hw->lock);
-}
-
-static int ti_ssp_spi_setup(struct spi_device *spi)
-{
- if (spi->bits_per_word > 32)
- return -EINVAL;
-
- return 0;
-}
-
-static int ti_ssp_spi_transfer(struct spi_device *spi, struct spi_message *m)
-{
- struct ti_ssp_spi *hw;
- struct spi_transfer *t;
- int error = 0;
-
- m->actual_length = 0;
- m->status = -EINPROGRESS;
-
- hw = spi_master_get_devdata(spi->master);
-
- if (list_empty(&m->transfers) || !m->complete)
- return -EINVAL;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->len && !(t->rx_buf || t->tx_buf)) {
- dev_err(&spi->dev, "invalid xfer, no buffer\n");
- return -EINVAL;
- }
-
- if (t->len && t->rx_buf && t->tx_buf) {
- dev_err(&spi->dev, "invalid xfer, full duplex\n");
- return -EINVAL;
- }
-
- if (t->bits_per_word > 32) {
- dev_err(&spi->dev, "invalid xfer width %d\n",
- t->bits_per_word);
- return -EINVAL;
- }
- }
-
- spin_lock(&hw->lock);
- if (hw->shutdown) {
- error = -ESHUTDOWN;
- goto error_unlock;
- }
- list_add_tail(&m->queue, &hw->msg_queue);
- queue_work(hw->workqueue, &hw->work);
-error_unlock:
- spin_unlock(&hw->lock);
- return error;
-}
-
-static int __devinit ti_ssp_spi_probe(struct platform_device *pdev)
-{
- const struct ti_ssp_spi_data *pdata;
- struct ti_ssp_spi *hw;
- struct spi_master *master;
- struct device *dev = &pdev->dev;
- int error = 0;
-
- pdata = dev->platform_data;
- if (!pdata) {
- dev_err(dev, "platform data not found\n");
- return -EINVAL;
- }
-
- master = spi_alloc_master(dev, sizeof(struct ti_ssp_spi));
- if (!master) {
- dev_err(dev, "cannot allocate SPI master\n");
- return -ENOMEM;
- }
-
- hw = spi_master_get_devdata(master);
- platform_set_drvdata(pdev, hw);
-
- hw->master = master;
- hw->dev = dev;
- hw->select = pdata->select;
-
- spin_lock_init(&hw->lock);
- init_completion(&hw->complete);
- INIT_LIST_HEAD(&hw->msg_queue);
- INIT_WORK(&hw->work, ti_ssp_spi_work);
-
- hw->workqueue = create_singlethread_workqueue(dev_name(dev));
- if (!hw->workqueue) {
- error = -ENOMEM;
- dev_err(dev, "work queue creation failed\n");
- goto error_wq;
- }
-
- error = ti_ssp_set_iosel(hw->dev, pdata->iosel);
- if (error < 0) {
- dev_err(dev, "io setup failed\n");
- goto error_iosel;
- }
-
- master->bus_num = pdev->id;
- master->num_chipselect = pdata->num_cs;
- master->mode_bits = MODE_BITS;
- master->flags = SPI_MASTER_HALF_DUPLEX;
- master->setup = ti_ssp_spi_setup;
- master->transfer = ti_ssp_spi_transfer;
-
- error = spi_register_master(master);
- if (error) {
- dev_err(dev, "master registration failed\n");
- goto error_reg;
- }
-
- return 0;
-
-error_reg:
-error_iosel:
- destroy_workqueue(hw->workqueue);
-error_wq:
- spi_master_put(master);
- return error;
-}
-
-static int __devexit ti_ssp_spi_remove(struct platform_device *pdev)
-{
- struct ti_ssp_spi *hw = platform_get_drvdata(pdev);
- int error;
-
- hw->shutdown = 1;
- while (!list_empty(&hw->msg_queue)) {
- error = wait_for_completion_interruptible(&hw->complete);
- if (error < 0) {
- hw->shutdown = 0;
- return error;
- }
- }
- destroy_workqueue(hw->workqueue);
- spi_unregister_master(hw->master);
-
- return 0;
-}
-
-static struct platform_driver ti_ssp_spi_driver = {
- .probe = ti_ssp_spi_probe,
- .remove = __devexit_p(ti_ssp_spi_remove),
- .driver = {
- .name = "ti-ssp-spi",
- .owner = THIS_MODULE,
- },
-};
-module_platform_driver(ti_ssp_spi_driver);
-
-MODULE_DESCRIPTION("SSP SPI Master");
-MODULE_AUTHOR("Cyril Chemparathy");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ti-ssp-spi");
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
index 24421024dea..daf5aa1c24c 100644
--- a/drivers/spi/spi-tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -52,8 +52,7 @@ static inline int tle62x0_write(struct tle62x0_state *st)
buff[1] = gpio_state;
}
- dev_dbg(&st->us->dev, "buff %02x,%02x,%02x\n",
- buff[0], buff[1], buff[2]);
+ dev_dbg(&st->us->dev, "buff %3ph\n", buff);
return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2);
}
@@ -240,24 +239,22 @@ static int to_gpio_num(struct device_attribute *attr)
return -1;
}
-static int __devinit tle62x0_probe(struct spi_device *spi)
+static int tle62x0_probe(struct spi_device *spi)
{
struct tle62x0_state *st;
struct tle62x0_pdata *pdata;
int ptr;
int ret;
- pdata = spi->dev.platform_data;
+ pdata = dev_get_platdata(&spi->dev);
if (pdata == NULL) {
dev_err(&spi->dev, "no device data specified\n");
return -EINVAL;
}
st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
- if (st == NULL) {
- dev_err(&spi->dev, "no memory for device state\n");
+ if (st == NULL)
return -ENOMEM;
- }
st->us = spi;
st->nr_gpio = pdata->gpio_count;
@@ -294,7 +291,7 @@ static int __devinit tle62x0_probe(struct spi_device *spi)
return ret;
}
-static int __devexit tle62x0_remove(struct spi_device *spi)
+static int tle62x0_remove(struct spi_device *spi)
{
struct tle62x0_state *st = spi_get_drvdata(spi);
int ptr;
@@ -313,7 +310,7 @@ static struct spi_driver tle62x0_driver = {
.owner = THIS_MODULE,
},
.probe = tle62x0_probe,
- .remove = __devexit_p(tle62x0_remove),
+ .remove = tle62x0_remove,
};
module_spi_driver(tle62x0_driver);
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 135f7406f4b..f05abf89c06 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -217,7 +217,7 @@ struct pch_pd_dev_save {
struct pch_spi_board_data *board_dat;
};
-static DEFINE_PCI_DEVICE_TABLE(pch_spi_pcidev_id) = {
+static const struct pci_device_id pch_spi_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
@@ -332,7 +332,7 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
data->transfer_active = false;
wake_up(&data->wait);
} else {
- dev_err(&data->master->dev,
+ dev_vdbg(&data->master->dev,
"%s : Transfer is not completed",
__func__);
}
@@ -367,7 +367,7 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id)
if (reg_spsr_val & SPSR_ORF_BIT) {
dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
- if (data->current_msg->complete != 0) {
+ if (data->current_msg->complete) {
data->transfer_complete = true;
data->current_msg->status = -EIO;
data->current_msg->complete(data->current_msg->context);
@@ -464,31 +464,6 @@ static void pch_spi_reset(struct spi_master *master)
pch_spi_writereg(master, PCH_SRST, 0x0);
}
-static int pch_spi_setup(struct spi_device *pspi)
-{
- /* check bits per word */
- if (pspi->bits_per_word == 0) {
- pspi->bits_per_word = 8;
- dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__);
- }
-
- if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) {
- dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__);
- return -EINVAL;
- }
-
- /* Check baud rate setting */
- /* if baud rate of chip is greater than
- max we can support,return error */
- if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE)
- pspi->max_speed_hz = PCH_MAX_BAUDRATE;
-
- dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__,
- (pspi->mode) & (SPI_CPOL | SPI_CPHA));
-
- return 0;
-}
-
static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
{
@@ -497,23 +472,6 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
int retval;
unsigned long flags;
- /* validate spi message and baud rate */
- if (unlikely(list_empty(&pmsg->transfers) == 1)) {
- dev_err(&pspi->dev, "%s list empty\n", __func__);
- retval = -EINVAL;
- goto err_out;
- }
-
- if (unlikely(pspi->max_speed_hz == 0)) {
- dev_err(&pspi->dev, "%s pch_spi_transfer maxspeed=%d\n",
- __func__, pspi->max_speed_hz);
- retval = -EINVAL;
- goto err_out;
- }
-
- dev_dbg(&pspi->dev, "%s Transfer List not empty. "
- "Transfer Speed is set.\n", __func__);
-
spin_lock_irqsave(&data->lock, flags);
/* validate Tx/Rx buffers and Transfer length */
list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
@@ -531,23 +489,9 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
goto err_return_spinlock;
}
- dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
- " valid\n", __func__);
-
- /* if baud rate has been specified validate the same */
- if (transfer->speed_hz > PCH_MAX_BAUDRATE)
- transfer->speed_hz = PCH_MAX_BAUDRATE;
-
- /* if bits per word has been specified validate the same */
- if (transfer->bits_per_word) {
- if ((transfer->bits_per_word != 8)
- && (transfer->bits_per_word != 16)) {
- retval = -EINVAL;
- dev_err(&pspi->dev,
- "%s Invalid bits per word\n", __func__);
- goto err_return_spinlock;
- }
- }
+ dev_dbg(&pspi->dev,
+ "%s Tx/Rx buffer valid. Transfer length valid\n",
+ __func__);
}
spin_unlock_irqrestore(&data->lock, flags);
@@ -615,7 +559,7 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
int size;
u32 n_writes;
int j;
- struct spi_message *pmsg;
+ struct spi_message *pmsg, *tmp;
const u8 *tx_buf;
const u16 *tx_sbuf;
@@ -656,10 +600,10 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
if (!data->pkt_rx_buff) {
/* flush queue and set status of all transfers to -ENOMEM */
dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__);
- list_for_each_entry(pmsg, data->queue.next, queue) {
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -ENOMEM;
- if (pmsg->complete != 0)
+ if (pmsg->complete)
pmsg->complete(pmsg->context);
/* delete from queue */
@@ -703,13 +647,13 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
static void pch_spi_nomore_transfer(struct pch_spi_data *data)
{
- struct spi_message *pmsg;
+ struct spi_message *pmsg, *tmp;
dev_dbg(&data->master->dev, "%s called\n", __func__);
/* Invoke complete callback
* [To the spi core..indicating end of transfer] */
data->current_msg->status = 0;
- if (data->current_msg->complete != 0) {
+ if (data->current_msg->complete) {
dev_dbg(&data->master->dev,
"%s:Invoking callback of SPI core\n", __func__);
data->current_msg->complete(data->current_msg->context);
@@ -740,7 +684,7 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data)
dev_dbg(&data->master->dev,
"%s suspend/remove initiated, flushing queue\n",
__func__);
- list_for_each_entry(pmsg, data->queue.next, queue) {
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -EIO;
if (pmsg->complete)
@@ -936,7 +880,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Tx DMA */
param = &dma->param_tx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
+ param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
param->tx_reg = data->io_base_addr + PCH_SPDWR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -951,7 +895,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Rx DMA */
param = &dma->param_rx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
+ param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
param->rx_reg = data->io_base_addr + PCH_SPDRR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -1172,8 +1116,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
dma->nent = num;
dma->desc_tx = desc_tx;
- dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
- "0x2 to SSNXCR\n", __func__);
+ dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
spin_lock_irqsave(&data->lock, flags);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
@@ -1187,7 +1130,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
static void pch_spi_process_messages(struct work_struct *pwork)
{
- struct spi_message *pmsg;
+ struct spi_message *pmsg, *tmp;
struct pch_spi_data *data;
int bpw;
@@ -1197,12 +1140,12 @@ static void pch_spi_process_messages(struct work_struct *pwork)
spin_lock(&data->lock);
/* check if suspend has been initiated;if yes flush queue */
if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
- dev_dbg(&data->master->dev, "%s suspend/remove initiated,"
- "flushing queue\n", __func__);
- list_for_each_entry(pmsg, data->queue.next, queue) {
+ dev_dbg(&data->master->dev,
+ "%s suspend/remove initiated, flushing queue\n", __func__);
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -EIO;
- if (pmsg->complete != 0) {
+ if (pmsg->complete) {
spin_unlock(&data->lock);
pmsg->complete(pmsg->context);
spin_lock(&data->lock);
@@ -1401,7 +1344,7 @@ static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
}
-static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
+static int pch_spi_pd_probe(struct platform_device *plat_dev)
{
int ret;
struct spi_master *master;
@@ -1426,22 +1369,23 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
/* baseaddress + address offset) */
data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
PCH_ADDRESS_SIZE * plat_dev->id;
- data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) +
- PCH_ADDRESS_SIZE * plat_dev->id;
+ data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
if (!data->io_remap_addr) {
dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
ret = -ENOMEM;
goto err_pci_iomap;
}
+ data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id;
dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
plat_dev->id, data->io_remap_addr);
/* initialize members of SPI master */
master->num_chipselect = PCH_MAX_CS;
- master->setup = pch_spi_setup;
master->transfer = pch_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->max_speed_hz = PCH_MAX_BAUDRATE;
data->board_dat = board_dat;
data->plat_dev = plat_dev;
@@ -1472,6 +1416,11 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
pch_spi_set_master_mode(master);
+ if (use_dma) {
+ dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+ pch_alloc_dma_buf(board_dat, data);
+ }
+
ret = spi_register_master(master);
if (ret != 0) {
dev_err(&plat_dev->dev,
@@ -1479,15 +1428,11 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
goto err_spi_register_master;
}
- if (use_dma) {
- dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
- pch_alloc_dma_buf(board_dat, data);
- }
-
return 0;
err_spi_register_master:
- free_irq(board_dat->pdev->irq, board_dat);
+ pch_free_dma_buf(board_dat, data);
+ free_irq(board_dat->pdev->irq, data);
err_request_irq:
pch_spi_free_resources(board_dat, data);
err_spi_get_resources:
@@ -1498,7 +1443,7 @@ err_pci_iomap:
return ret;
}
-static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev)
+static int pch_spi_pd_remove(struct platform_device *plat_dev)
{
struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
struct pch_spi_data *data = platform_get_drvdata(plat_dev);
@@ -1619,13 +1564,12 @@ static struct platform_driver pch_spi_pd_driver = {
.owner = THIS_MODULE,
},
.probe = pch_spi_pd_probe,
- .remove = __devexit_p(pch_spi_pd_remove),
+ .remove = pch_spi_pd_remove,
.suspend = pch_spi_pd_suspend,
.resume = pch_spi_pd_resume
};
-static int __devinit pch_spi_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pch_spi_board_data *board_dat;
struct platform_device *pd_dev = NULL;
@@ -1634,14 +1578,11 @@ static int __devinit pch_spi_probe(struct pci_dev *pdev,
struct pch_pd_dev_save *pd_dev_save;
pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
- if (!pd_dev_save) {
- dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__);
+ if (!pd_dev_save)
return -ENOMEM;
- }
board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
if (!board_dat) {
- dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__);
retval = -ENOMEM;
goto err_no_mem;
}
@@ -1667,6 +1608,7 @@ static int __devinit pch_spi_probe(struct pci_dev *pdev,
pd_dev = platform_device_alloc("pch-spi", i);
if (!pd_dev) {
dev_err(&pdev->dev, "platform_device_alloc failed\n");
+ retval = -ENOMEM;
goto err_platform_device;
}
pd_dev_save->pd_save[i] = pd_dev;
@@ -1694,6 +1636,8 @@ static int __devinit pch_spi_probe(struct pci_dev *pdev,
return 0;
err_platform_device:
+ while (--i >= 0)
+ platform_device_unregister(pd_dev_save->pd_save[i]);
pci_disable_device(pdev);
pci_enable_device:
pci_release_regions(pdev);
@@ -1705,7 +1649,7 @@ err_no_mem:
return retval;
}
-static void __devexit pch_spi_remove(struct pci_dev *pdev)
+static void pch_spi_remove(struct pci_dev *pdev)
{
int i;
struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
@@ -1776,7 +1720,7 @@ static struct pci_driver pch_spi_pcidev_driver = {
.name = "pch_spi",
.id_table = pch_spi_pcidev_id,
.probe = pch_spi_probe,
- .remove = __devexit_p(pch_spi_remove),
+ .remove = pch_spi_remove,
.suspend = pch_spi_suspend,
.resume = pch_spi_resume,
};
@@ -1789,8 +1733,10 @@ static int __init pch_spi_init(void)
return ret;
ret = pci_register_driver(&pch_spi_pcidev_driver);
- if (ret)
+ if (ret) {
+ platform_driver_unregister(&pch_spi_pd_driver);
return ret;
+ }
return 0;
}
@@ -1809,3 +1755,5 @@ MODULE_PARM_DESC(use_dma,
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
+MODULE_DEVICE_TABLE(pci, pch_spi_pcidev_id);
+
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index d5a3cbb646c..5f183baa91a 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -26,7 +26,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <asm/gpio.h>
+#include <linux/gpio.h>
#define SPI_FIFO_SIZE 4
@@ -80,7 +80,6 @@ struct txx9spi {
void __iomem *membase;
int baseclk;
struct clk *clk;
- u32 max_speed_hz, min_speed_hz;
int last_chipselect;
int last_chipselect_val;
};
@@ -116,15 +115,8 @@ static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
static int txx9spi_setup(struct spi_device *spi)
{
struct txx9spi *c = spi_master_get_devdata(spi->master);
- u8 bits_per_word;
- if (!spi->max_speed_hz
- || spi->max_speed_hz > c->max_speed_hz
- || spi->max_speed_hz < c->min_speed_hz)
- return -EINVAL;
-
- bits_per_word = spi->bits_per_word;
- if (bits_per_word != 8 && bits_per_word != 16)
+ if (!spi->max_speed_hz)
return -EINVAL;
if (gpio_direction_output(spi->chip_select,
@@ -182,16 +174,15 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
| 0x08,
TXx9_SPCR0);
- list_for_each_entry (t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
const void *txbuf = t->tx_buf;
void *rxbuf = t->rx_buf;
u32 data;
unsigned int len = t->len;
unsigned int wsize;
u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
- u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
+ u8 bits_per_word = t->bits_per_word;
- bits_per_word = bits_per_word ? : 8;
wsize = bits_per_word >> 3; /* in bytes */
if (prev_speed_hz != speed_hz
@@ -271,7 +262,8 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
exit:
m->status = status;
- m->complete(m->context);
+ if (m->complete)
+ m->complete(m->context);
/* normally deactivate chipselect ... unless no error and
* cs_change has hinted that the next message will probably
@@ -314,19 +306,9 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
m->actual_length = 0;
/* check each transfer's parameters */
- list_for_each_entry (t, &m->transfers, transfer_list) {
- u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
- u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
-
- bits_per_word = bits_per_word ? : 8;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
if (!t->tx_buf && !t->rx_buf && t->len)
return -EINVAL;
- if (bits_per_word != 8 && bits_per_word != 16)
- return -EINVAL;
- if (t->len & ((bits_per_word >> 3) - 1))
- return -EINVAL;
- if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz)
- return -EINVAL;
}
spin_lock_irqsave(&c->lock, flags);
@@ -337,7 +319,7 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
-static int __init txx9spi_probe(struct platform_device *dev)
+static int txx9spi_probe(struct platform_device *dev)
{
struct spi_master *master;
struct txx9spi *c;
@@ -357,7 +339,7 @@ static int __init txx9spi_probe(struct platform_device *dev)
INIT_LIST_HEAD(&c->queue);
init_waitqueue_head(&c->waitq);
- c->clk = clk_get(&dev->dev, "spi-baseclk");
+ c->clk = devm_clk_get(&dev->dev, "spi-baseclk");
if (IS_ERR(c->clk)) {
ret = PTR_ERR(c->clk);
c->clk = NULL;
@@ -365,22 +347,16 @@ static int __init txx9spi_probe(struct platform_device *dev)
}
ret = clk_enable(c->clk);
if (ret) {
- clk_put(c->clk);
c->clk = NULL;
goto exit;
}
c->baseclk = clk_get_rate(c->clk);
- c->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1);
- c->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1);
+ master->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1);
+ master->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1);
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- goto exit_busy;
- if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res),
- "spi_txx9"))
- goto exit_busy;
- c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res));
- if (!c->membase)
+ c->membase = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(c->membase))
goto exit_busy;
/* enter config mode */
@@ -413,8 +389,9 @@ static int __init txx9spi_probe(struct platform_device *dev)
master->setup = txx9spi_setup;
master->transfer = txx9spi_transfer;
master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&dev->dev, master);
if (ret)
goto exit;
return 0;
@@ -423,26 +400,19 @@ exit_busy:
exit:
if (c->workqueue)
destroy_workqueue(c->workqueue);
- if (c->clk) {
+ if (c->clk)
clk_disable(c->clk);
- clk_put(c->clk);
- }
- platform_set_drvdata(dev, NULL);
spi_master_put(master);
return ret;
}
-static int __exit txx9spi_remove(struct platform_device *dev)
+static int txx9spi_remove(struct platform_device *dev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
+ struct spi_master *master = platform_get_drvdata(dev);
struct txx9spi *c = spi_master_get_devdata(master);
- spi_unregister_master(master);
- platform_set_drvdata(dev, NULL);
destroy_workqueue(c->workqueue);
clk_disable(c->clk);
- clk_put(c->clk);
- spi_master_put(master);
return 0;
}
@@ -450,7 +420,8 @@ static int __exit txx9spi_remove(struct platform_device *dev)
MODULE_ALIAS("platform:spi_txx9");
static struct platform_driver txx9spi_driver = {
- .remove = __exit_p(txx9spi_remove),
+ .probe = txx9spi_probe,
+ .remove = txx9spi_remove,
.driver = {
.name = "spi_txx9",
.owner = THIS_MODULE,
@@ -459,7 +430,7 @@ static struct platform_driver txx9spi_driver = {
static int __init txx9spi_init(void)
{
- return platform_driver_probe(&txx9spi_driver, txx9spi_probe);
+ return platform_driver_register(&txx9spi_driver);
}
subsys_initcall(txx9spi_init);
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 266a847e299..bb478dccf1d 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -74,15 +73,13 @@ static void spi_xcomm_chipselect(struct spi_xcomm *spi_xcomm,
static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm,
struct spi_device *spi, struct spi_transfer *t, unsigned int *settings)
{
- unsigned int speed;
-
- if ((t->bits_per_word && t->bits_per_word != 8) || t->len > 62)
+ if (t->len > 62)
return -EINVAL;
- speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+ if (t->speed_hz != spi_xcomm->current_speed) {
+ unsigned int divider;
- if (speed != spi_xcomm->current_speed) {
- unsigned int divider = DIV_ROUND_UP(SPI_XCOMM_CLOCK, speed);
+ divider = DIV_ROUND_UP(SPI_XCOMM_CLOCK, t->speed_hz);
if (divider >= 64)
*settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_64;
else if (divider >= 16)
@@ -90,7 +87,7 @@ static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm,
else
*settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_4;
- spi_xcomm->current_speed = speed;
+ spi_xcomm->current_speed = t->speed_hz;
}
if (spi->mode & SPI_CPOL)
@@ -148,8 +145,6 @@ static int spi_xcomm_transfer_one(struct spi_master *master,
int status = 0;
bool is_last;
- is_first = true;
-
spi_xcomm_chipselect(spi_xcomm, spi, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
@@ -209,15 +204,7 @@ static int spi_xcomm_transfer_one(struct spi_master *master,
return status;
}
-static int spi_xcomm_setup(struct spi_device *spi)
-{
- if (spi->bits_per_word != 8)
- return -EINVAL;
-
- return 0;
-}
-
-static int __devinit spi_xcomm_probe(struct i2c_client *i2c,
+static int spi_xcomm_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct spi_xcomm *spi_xcomm;
@@ -233,28 +220,19 @@ static int __devinit spi_xcomm_probe(struct i2c_client *i2c,
master->num_chipselect = 16;
master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->flags = SPI_MASTER_HALF_DUPLEX;
- master->setup = spi_xcomm_setup;
master->transfer_one_message = spi_xcomm_transfer_one;
master->dev.of_node = i2c->dev.of_node;
i2c_set_clientdata(i2c, master);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&i2c->dev, master);
if (ret < 0)
spi_master_put(master);
return ret;
}
-static int __devexit spi_xcomm_remove(struct i2c_client *i2c)
-{
- struct spi_master *master = i2c_get_clientdata(i2c);
-
- spi_unregister_master(master);
-
- return 0;
-}
-
static const struct i2c_device_id spi_xcomm_ids[] = {
{ "spi-xcomm" },
{ },
@@ -267,7 +245,6 @@ static struct i2c_driver spi_xcomm_driver = {
},
.id_table = spi_xcomm_ids,
.probe = spi_xcomm_probe,
- .remove = __devexit_p(spi_xcomm_remove),
};
module_i2c_driver(spi_xcomm_driver);
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 4c5a663b9fa..a3b0b9944bf 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -30,6 +29,7 @@
*/
#define XSPI_CR_OFFSET 0x60 /* Control Register */
+#define XSPI_CR_LOOP 0x01
#define XSPI_CR_ENABLE 0x02
#define XSPI_CR_MASTER_MODE 0x04
#define XSPI_CR_CPOL 0x08
@@ -79,19 +79,18 @@ struct xilinx_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
struct completion done;
- struct resource mem; /* phys mem */
void __iomem *regs; /* virt. address of the control registers */
- u32 irq;
+ int irq;
u8 *rx_ptr; /* pointer in the Tx buffer */
const u8 *tx_ptr; /* pointer in the Rx buffer */
int remaining_bytes; /* the number of bytes left to transfer */
u8 bits_per_word;
- unsigned int (*read_fn) (void __iomem *);
- void (*write_fn) (u32, void __iomem *);
- void (*tx_fn) (struct xilinx_spi *);
- void (*rx_fn) (struct xilinx_spi *);
+ unsigned int (*read_fn)(void __iomem *);
+ void (*write_fn)(u32, void __iomem *);
+ void (*tx_fn)(struct xilinx_spi *);
+ void (*rx_fn)(struct xilinx_spi *);
};
static void xspi_write32(u32 val, void __iomem *addr)
@@ -209,41 +208,11 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
}
/* spi_bitbang requires custom setup_transfer() to be defined if there is a
- * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
- * supports 8 or 16 bits per word which cannot be changed in software.
- * SPI clock can't be changed in software either.
- * Check for correct bits per word. Chip select delay calculations could be
- * added here as soon as bitbang_work() can be made aware of the delay value.
+ * custom txrx_bufs().
*/
static int xilinx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
- u8 bits_per_word;
-
- bits_per_word = (t && t->bits_per_word)
- ? t->bits_per_word : spi->bits_per_word;
- if (bits_per_word != xspi->bits_per_word) {
- dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, bits_per_word);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int xilinx_spi_setup(struct spi_device *spi)
-{
- /* always return 0, we can not check the number of bits.
- * There are cases when SPI setup is called before any driver is
- * there, in that case the SPI core defaults to 8 bits, which we
- * do not support in some cases. But if we return an error, the
- * SPI device would not be registered and no driver can get hold of it
- * When the driver is there, it will call SPI setup again with the
- * correct number of bits per transfer.
- * If a driver setups with the wrong bit number, it will fail when
- * it tries to do a transfer
- */
return 0;
}
@@ -267,16 +236,14 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
u32 ipif_ier;
- u16 cr;
/* We get here with transmitter inhibited */
xspi->tx_ptr = t->tx_buf;
xspi->rx_ptr = t->rx_buf;
xspi->remaining_bytes = t->len;
- INIT_COMPLETION(xspi->done);
+ reinit_completion(&xspi->done);
- xilinx_spi_fill_tx_fifo(xspi);
/* Enable the transmit empty interrupt, which we use to determine
* progress on the transmission.
@@ -285,12 +252,41 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
xspi->regs + XIPIF_V123B_IIER_OFFSET);
- /* Start the transfer by not inhibiting the transmitter any longer */
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
- ~XSPI_CR_TRANS_INHIBIT;
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+ for (;;) {
+ u16 cr;
+ u8 sr;
+
+ xilinx_spi_fill_tx_fifo(xspi);
+
+ /* Start the transfer by not inhibiting the transmitter any
+ * longer
+ */
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
+ ~XSPI_CR_TRANS_INHIBIT;
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+
+ wait_for_completion(&xspi->done);
- wait_for_completion(&xspi->done);
+ /* A transmit has just completed. Process received data and
+ * check for more data to transmit. Always inhibit the
+ * transmitter while the Isr refills the transmit register/FIFO,
+ * or make sure it is stopped if we're done.
+ */
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ xspi->regs + XSPI_CR_OFFSET);
+
+ /* Read out all the data from the Rx FIFO */
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
+ xspi->rx_fn(xspi);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ }
+
+ /* See if there is more data to send */
+ if (xspi->remaining_bytes <= 0)
+ break;
+ }
/* Disable the transmit empty interrupt */
xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
@@ -314,38 +310,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
- u16 cr;
- u8 sr;
-
- /* A transmit has just completed. Process received data and
- * check for more data to transmit. Always inhibit the
- * transmitter while the Isr refills the transmit register/FIFO,
- * or make sure it is stopped if we're done.
- */
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
- xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
- xspi->regs + XSPI_CR_OFFSET);
-
- /* Read out all the data from the Rx FIFO */
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
- xspi->rx_fn(xspi);
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- }
-
- /* See if there is more data to send */
- if (xspi->remaining_bytes > 0) {
- xilinx_spi_fill_tx_fifo(xspi);
- /* Start the transfer by not inhibiting the
- * transmitter any longer
- */
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
- } else {
- /* No more data to send.
- * Indicate the transfer is completed.
- */
- complete(&xspi->done);
- }
+ complete(&xspi->done);
}
return IRQ_HANDLED;
@@ -358,51 +323,75 @@ static const struct of_device_id xilinx_spi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
-struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
- u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word)
+static int xilinx_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
struct xilinx_spi *xspi;
- int ret;
+ struct xspi_platform_data *pdata;
+ struct resource *res;
+ int ret, num_cs = 0, bits_per_word = 8;
+ struct spi_master *master;
+ u32 tmp;
+ u8 i;
- master = spi_alloc_master(dev, sizeof(struct xilinx_spi));
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ num_cs = pdata->num_chipselect;
+ bits_per_word = pdata->bits_per_word;
+ } else {
+ of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
+ &num_cs);
+ }
+
+ if (!num_cs) {
+ dev_err(&pdev->dev,
+ "Missing slave select configuration data\n");
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
if (!master)
- return NULL;
+ return -ENODEV;
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA;
xspi = spi_master_get_devdata(master);
- xspi->bitbang.master = spi_master_get(master);
+ xspi->bitbang.master = master;
xspi->bitbang.chipselect = xilinx_spi_chipselect;
xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
- xspi->bitbang.master->setup = xilinx_spi_setup;
init_completion(&xspi->done);
- if (!request_mem_region(mem->start, resource_size(mem),
- XILINX_SPI_NAME))
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xspi->regs)) {
+ ret = PTR_ERR(xspi->regs);
goto put_master;
-
- xspi->regs = ioremap(mem->start, resource_size(mem));
- if (xspi->regs == NULL) {
- dev_warn(dev, "ioremap failure\n");
- goto map_failed;
}
- master->bus_num = bus_num;
+ master->bus_num = pdev->dev.id;
master->num_chipselect = num_cs;
- master->dev.of_node = dev->of_node;
+ master->dev.of_node = pdev->dev.of_node;
+
+ /*
+ * Detect endianess on the IP via loop bit in CR. Detection
+ * must be done before reset is sent because incorrect reset
+ * value generates error interrupt.
+ * Setup little endian helper functions first and try to use them
+ * and check if bit was correctly setup or not.
+ */
+ xspi->read_fn = xspi_read32;
+ xspi->write_fn = xspi_write32;
- xspi->mem = *mem;
- xspi->irq = irq;
- if (little_endian) {
- xspi->read_fn = xspi_read32;
- xspi->write_fn = xspi_write32;
- } else {
+ xspi->write_fn(XSPI_CR_LOOP, xspi->regs + XSPI_CR_OFFSET);
+ tmp = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ tmp &= XSPI_CR_LOOP;
+ if (tmp != XSPI_CR_LOOP) {
xspi->read_fn = xspi_read32_be;
xspi->write_fn = xspi_write32_be;
}
+
+ master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
xspi->bits_per_word = bits_per_word;
if (xspi->bits_per_word == 8) {
xspi->tx_fn = xspi_tx8;
@@ -413,115 +402,63 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
} else if (xspi->bits_per_word == 32) {
xspi->tx_fn = xspi_tx32;
xspi->rx_fn = xspi_rx32;
- } else
- goto unmap_io;
-
+ } else {
+ ret = -EINVAL;
+ goto put_master;
+ }
/* SPI controller initializations */
xspi_init_hw(xspi);
+ xspi->irq = platform_get_irq(pdev, 0);
+ if (xspi->irq < 0) {
+ ret = xspi->irq;
+ goto put_master;
+ }
+
/* Register for SPI Interrupt */
- ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
+ ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
+ dev_name(&pdev->dev), xspi);
if (ret)
- goto unmap_io;
+ goto put_master;
ret = spi_bitbang_start(&xspi->bitbang);
if (ret) {
- dev_err(dev, "spi_bitbang_start FAILED\n");
- goto free_irq;
- }
-
- dev_info(dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
- (unsigned long long)mem->start, xspi->regs, xspi->irq);
- return master;
-
-free_irq:
- free_irq(xspi->irq, xspi);
-unmap_io:
- iounmap(xspi->regs);
-map_failed:
- release_mem_region(mem->start, resource_size(mem));
-put_master:
- spi_master_put(master);
- return NULL;
-}
-EXPORT_SYMBOL(xilinx_spi_init);
-
-void xilinx_spi_deinit(struct spi_master *master)
-{
- struct xilinx_spi *xspi;
-
- xspi = spi_master_get_devdata(master);
-
- spi_bitbang_stop(&xspi->bitbang);
- free_irq(xspi->irq, xspi);
- iounmap(xspi->regs);
-
- release_mem_region(xspi->mem.start, resource_size(&xspi->mem));
- spi_master_put(xspi->bitbang.master);
-}
-EXPORT_SYMBOL(xilinx_spi_deinit);
-
-static int __devinit xilinx_spi_probe(struct platform_device *dev)
-{
- struct xspi_platform_data *pdata;
- struct resource *r;
- int irq, num_cs = 0, little_endian = 0, bits_per_word = 8;
- struct spi_master *master;
- u8 i;
-
- pdata = dev->dev.platform_data;
- if (pdata) {
- num_cs = pdata->num_chipselect;
- little_endian = pdata->little_endian;
- bits_per_word = pdata->bits_per_word;
- }
-
-#ifdef CONFIG_OF
- if (dev->dev.of_node) {
- const __be32 *prop;
- int len;
-
- /* number of slave select bits is required */
- prop = of_get_property(dev->dev.of_node, "xlnx,num-ss-bits",
- &len);
- if (prop && len >= sizeof(*prop))
- num_cs = __be32_to_cpup(prop);
- }
-#endif
-
- if (!num_cs) {
- dev_err(&dev->dev, "Missing slave select configuration data\n");
- return -EINVAL;
+ dev_err(&pdev->dev, "spi_bitbang_start FAILED\n");
+ goto put_master;
}
-
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
-
- irq = platform_get_irq(dev, 0);
- if (irq < 0)
- return -ENXIO;
-
- master = xilinx_spi_init(&dev->dev, r, irq, dev->id, num_cs,
- little_endian, bits_per_word);
- if (!master)
- return -ENODEV;
+ dev_info(&pdev->dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
+ (unsigned long long)res->start, xspi->regs, xspi->irq);
if (pdata) {
for (i = 0; i < pdata->num_devices; i++)
spi_new_device(master, pdata->devices + i);
}
- platform_set_drvdata(dev, master);
+ platform_set_drvdata(pdev, master);
return 0;
+
+put_master:
+ spi_master_put(master);
+
+ return ret;
}
-static int __devexit xilinx_spi_remove(struct platform_device *dev)
+static int xilinx_spi_remove(struct platform_device *pdev)
{
- xilinx_spi_deinit(platform_get_drvdata(dev));
- platform_set_drvdata(dev, 0);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ void __iomem *regs_base = xspi->regs;
+
+ spi_bitbang_stop(&xspi->bitbang);
+
+ /* Disable all the interrupts just in case */
+ xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
+ /* Disable the global IPIF interrupt */
+ xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
+
+ spi_master_put(xspi->bitbang.master);
return 0;
}
@@ -531,7 +468,7 @@ MODULE_ALIAS("platform:" XILINX_SPI_NAME);
static struct platform_driver xilinx_spi_driver = {
.probe = xilinx_spi_probe,
- .remove = __devexit_p(xilinx_spi_remove),
+ .remove = xilinx_spi_remove,
.driver = {
.name = XILINX_SPI_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
new file mode 100644
index 00000000000..41e158187f9
--- /dev/null
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -0,0 +1,170 @@
+/*
+ * Xtensa xtfpga SPI controller driver
+ *
+ * Copyright (c) 2014 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#define XTFPGA_SPI_NAME "xtfpga_spi"
+
+#define XTFPGA_SPI_START 0x0
+#define XTFPGA_SPI_BUSY 0x4
+#define XTFPGA_SPI_DATA 0x8
+
+#define BUSY_WAIT_US 100
+
+struct xtfpga_spi {
+ struct spi_bitbang bitbang;
+ void __iomem *regs;
+ u32 data;
+ unsigned data_sz;
+};
+
+static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
+ unsigned addr, u32 val)
+{
+ iowrite32(val, spi->regs + addr);
+}
+
+static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
+ unsigned addr)
+{
+ return ioread32(spi->regs + addr);
+}
+
+static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
+{
+ unsigned i;
+ for (i = 0; xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY) &&
+ i < BUSY_WAIT_US; ++i)
+ udelay(1);
+ WARN_ON_ONCE(i == BUSY_WAIT_US);
+}
+
+static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
+ u32 v, u8 bits)
+{
+ struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+
+ xspi->data = (xspi->data << bits) | (v & GENMASK(bits - 1, 0));
+ xspi->data_sz += bits;
+ if (xspi->data_sz >= 16) {
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_DATA,
+ xspi->data >> (xspi->data_sz - 16));
+ xspi->data_sz -= 16;
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 1);
+ xtfpga_spi_wait_busy(xspi);
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
+ }
+
+ return 0;
+}
+
+static void xtfpga_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+
+ WARN_ON(xspi->data_sz != 0);
+ xspi->data_sz = 0;
+}
+
+static int xtfpga_spi_probe(struct platform_device *pdev)
+{
+ struct xtfpga_spi *xspi;
+ struct resource *mem;
+ int ret;
+ struct spi_master *master;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
+ if (!master)
+ return -ENOMEM;
+
+ master->flags = SPI_MASTER_NO_RX;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->bus_num = pdev->dev.id;
+ master->dev.of_node = pdev->dev.of_node;
+
+ xspi = spi_master_get_devdata(master);
+ xspi->bitbang.master = master;
+ xspi->bitbang.chipselect = xtfpga_spi_chipselect;
+ xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ xspi->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(xspi->regs)) {
+ ret = PTR_ERR(xspi->regs);
+ goto err;
+ }
+
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
+ usleep_range(1000, 2000);
+ if (xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY)) {
+ dev_err(&pdev->dev, "Device stuck in busy state\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ret = spi_bitbang_start(&xspi->bitbang);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_bitbang_start failed\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, master);
+ return 0;
+err:
+ spi_master_put(master);
+ return ret;
+}
+
+static int xtfpga_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct xtfpga_spi *xspi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&xspi->bitbang);
+ spi_master_put(master);
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:" XTFPGA_SPI_NAME);
+
+#ifdef CONFIG_OF
+static const struct of_device_id xtfpga_spi_of_match[] = {
+ { .compatible = "cdns,xtfpga-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xtfpga_spi_of_match);
+#endif
+
+static struct platform_driver xtfpga_spi_driver = {
+ .probe = xtfpga_spi_probe,
+ .remove = xtfpga_spi_remove,
+ .driver = {
+ .name = XTFPGA_SPI_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(xtfpga_spi_of_match),
+ },
+};
+module_platform_driver(xtfpga_spi_driver);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_DESCRIPTION("xtensa xtfpga SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 84c2861d6f4..d4f9670b51b 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -24,17 +24,25 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
+#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#include <linux/sched.h>
+#include <linux/sched/rt.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/ioport.h>
+#include <linux/acpi.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/spi.h>
static void spidev_release(struct device *dev)
{
@@ -52,14 +60,21 @@ static ssize_t
modalias_show(struct device *dev, struct device_attribute *a, char *buf)
{
const struct spi_device *spi = to_spi_device(dev);
+ int len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute spi_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static struct attribute *spi_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(spi_dev);
/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
* and the sysfs version makes coldplug work too.
@@ -93,6 +108,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
if (of_driver_match_device(dev, drv))
return 1;
+ /* Then try ACPI */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
if (sdrv->id_table)
return !!spi_match_id(sdrv->id_table, spi);
@@ -102,6 +121,11 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
{
const struct spi_device *spi = to_spi_device(dev);
+ int rc;
+
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
return 0;
@@ -216,13 +240,13 @@ static const struct dev_pm_ops spi_pm = {
SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend,
pm_generic_runtime_resume,
- pm_generic_runtime_idle
+ NULL
)
};
struct bus_type spi_bus_type = {
.name = "spi",
- .dev_attrs = spi_dev_attrs,
+ .dev_groups = spi_dev_groups,
.match = spi_match_device,
.uevent = spi_uevent,
.pm = &spi_pm,
@@ -233,15 +257,25 @@ EXPORT_SYMBOL_GPL(spi_bus_type);
static int spi_drv_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ int ret;
+
+ acpi_dev_pm_attach(dev, true);
+ ret = sdrv->probe(to_spi_device(dev));
+ if (ret)
+ acpi_dev_pm_detach(dev, true);
- return sdrv->probe(to_spi_device(dev));
+ return ret;
}
static int spi_drv_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ int ret;
+
+ ret = sdrv->remove(to_spi_device(dev));
+ acpi_dev_pm_detach(dev, true);
- return sdrv->remove(to_spi_device(dev));
+ return ret;
}
static void spi_drv_shutdown(struct device *dev)
@@ -316,7 +350,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
if (!spi_master_get(master))
return NULL;
- spi = kzalloc(sizeof *spi, GFP_KERNEL);
+ spi = kzalloc(sizeof(*spi), GFP_KERNEL);
if (!spi) {
dev_err(dev, "cannot alloc spi_device\n");
spi_master_put(master);
@@ -327,11 +361,36 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
spi->dev.parent = &master->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
+ spi->cs_gpio = -ENOENT;
device_initialize(&spi->dev);
return spi;
}
EXPORT_SYMBOL_GPL(spi_alloc_device);
+static void spi_dev_set_name(struct spi_device *spi)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
+
+ if (adev) {
+ dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
+ return;
+ }
+
+ dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
+ spi->chip_select);
+}
+
+static int spi_dev_check(struct device *dev, void *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_device *new_spi = data;
+
+ if (spi->master == new_spi->master &&
+ spi->chip_select == new_spi->chip_select)
+ return -EBUSY;
+ return 0;
+}
+
/**
* spi_add_device - Add spi_device allocated with spi_alloc_device
* @spi: spi_device to register
@@ -344,22 +403,20 @@ EXPORT_SYMBOL_GPL(spi_alloc_device);
int spi_add_device(struct spi_device *spi)
{
static DEFINE_MUTEX(spi_add_lock);
- struct device *dev = spi->master->dev.parent;
- struct device *d;
+ struct spi_master *master = spi->master;
+ struct device *dev = master->dev.parent;
int status;
/* Chipselects are numbered 0..max; validate. */
- if (spi->chip_select >= spi->master->num_chipselect) {
+ if (spi->chip_select >= master->num_chipselect) {
dev_err(dev, "cs%d >= max %d\n",
spi->chip_select,
- spi->master->num_chipselect);
+ master->num_chipselect);
return -EINVAL;
}
/* Set the bus ID string */
- dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
- spi->chip_select);
-
+ spi_dev_set_name(spi);
/* We need to make sure there's no other device with this
* chipselect **BEFORE** we call setup(), else we'll trash
@@ -367,15 +424,16 @@ int spi_add_device(struct spi_device *spi)
*/
mutex_lock(&spi_add_lock);
- d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev));
- if (d != NULL) {
+ status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
+ if (status) {
dev_err(dev, "chipselect %d already in use\n",
spi->chip_select);
- put_device(d);
- status = -EBUSY;
goto done;
}
+ if (master->cs_gpios)
+ spi->cs_gpio = master->cs_gpios[spi->chip_select];
+
/* Drivers may modify this initial i/o setup, but will
* normally rely on the device being setup. Devices
* using SPI_CS_HIGH can't coexist well otherwise...
@@ -486,8 +544,7 @@ static void spi_match_master_to_boardinfo(struct spi_master *master,
* The board info passed can safely be __initdata ... but be careful of
* any embedded pointers (platform_data, etc), they're copied as-is.
*/
-int __devinit
-spi_register_board_info(struct spi_board_info const *info, unsigned n)
+int spi_register_board_info(struct spi_board_info const *info, unsigned n)
{
struct boardinfo *bi;
int i;
@@ -512,6 +569,291 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
/*-------------------------------------------------------------------------*/
+static void spi_set_cs(struct spi_device *spi, bool enable)
+{
+ if (spi->mode & SPI_CS_HIGH)
+ enable = !enable;
+
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, !enable);
+ else if (spi->master->set_cs)
+ spi->master->set_cs(spi, !enable);
+}
+
+#ifdef CONFIG_HAS_DMA
+static int spi_map_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ const bool vmalloced_buf = is_vmalloc_addr(buf);
+ const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
+ const int sgs = DIV_ROUND_UP(len, desc_len);
+ struct page *vm_page;
+ void *sg_buf;
+ size_t min;
+ int i, ret;
+
+ ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < sgs; i++) {
+ min = min_t(size_t, len, desc_len);
+
+ if (vmalloced_buf) {
+ vm_page = vmalloc_to_page(buf);
+ if (!vm_page) {
+ sg_free_table(sgt);
+ return -ENOMEM;
+ }
+ sg_buf = page_address(vm_page) +
+ ((size_t)buf & ~PAGE_MASK);
+ } else {
+ sg_buf = buf;
+ }
+
+ sg_set_buf(&sgt->sgl[i], sg_buf, min);
+
+ buf += min;
+ len -= min;
+ }
+
+ ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
+ if (ret < 0) {
+ sg_free_table(sgt);
+ return ret;
+ }
+
+ sgt->nents = ret;
+
+ return 0;
+}
+
+static void spi_unmap_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ if (sgt->orig_nents) {
+ dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+ sg_free_table(sgt);
+ }
+}
+
+static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct device *tx_dev, *rx_dev;
+ struct spi_transfer *xfer;
+ int ret;
+
+ if (!master->can_dma)
+ return 0;
+
+ tx_dev = &master->dma_tx->dev->device;
+ rx_dev = &master->dma_rx->dev->device;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ if (xfer->tx_buf != NULL) {
+ ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
+ (void *)xfer->tx_buf, xfer->len,
+ DMA_TO_DEVICE);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (xfer->rx_buf != NULL) {
+ ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE);
+ if (ret != 0) {
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE);
+ return ret;
+ }
+ }
+ }
+
+ master->cur_msg_mapped = true;
+
+ return 0;
+}
+
+static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ struct device *tx_dev, *rx_dev;
+
+ if (!master->cur_msg_mapped || !master->can_dma)
+ return 0;
+
+ tx_dev = &master->dma_tx->dev->device;
+ rx_dev = &master->dma_rx->dev->device;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+#else /* !CONFIG_HAS_DMA */
+static inline int __spi_map_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ return 0;
+}
+
+static inline int spi_unmap_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ return 0;
+}
+#endif /* !CONFIG_HAS_DMA */
+
+static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ void *tmp;
+ unsigned int max_tx, max_rx;
+
+ if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+ max_tx = 0;
+ max_rx = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if ((master->flags & SPI_MASTER_MUST_TX) &&
+ !xfer->tx_buf)
+ max_tx = max(xfer->len, max_tx);
+ if ((master->flags & SPI_MASTER_MUST_RX) &&
+ !xfer->rx_buf)
+ max_rx = max(xfer->len, max_rx);
+ }
+
+ if (max_tx) {
+ tmp = krealloc(master->dummy_tx, max_tx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_tx = tmp;
+ memset(tmp, 0, max_tx);
+ }
+
+ if (max_rx) {
+ tmp = krealloc(master->dummy_rx, max_rx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_rx = tmp;
+ }
+
+ if (max_tx || max_rx) {
+ list_for_each_entry(xfer, &msg->transfers,
+ transfer_list) {
+ if (!xfer->tx_buf)
+ xfer->tx_buf = master->dummy_tx;
+ if (!xfer->rx_buf)
+ xfer->rx_buf = master->dummy_rx;
+ }
+ }
+ }
+
+ return __spi_map_msg(master, msg);
+}
+
+/*
+ * spi_transfer_one_message - Default implementation of transfer_one_message()
+ *
+ * This is a standard implementation of transfer_one_message() for
+ * drivers which impelment a transfer_one() operation. It provides
+ * standard handling of delays and chip select management.
+ */
+static int spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ bool keep_cs = false;
+ int ret = 0;
+ int ms = 1;
+
+ spi_set_cs(msg->spi, true);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ trace_spi_transfer_start(msg, xfer);
+
+ reinit_completion(&master->xfer_completion);
+
+ ret = master->transfer_one(master, msg->spi, xfer);
+ if (ret < 0) {
+ dev_err(&msg->spi->dev,
+ "SPI transfer failed: %d\n", ret);
+ goto out;
+ }
+
+ if (ret > 0) {
+ ret = 0;
+ ms = xfer->len * 8 * 1000 / xfer->speed_hz;
+ ms += ms + 100; /* some tolerance */
+
+ ms = wait_for_completion_timeout(&master->xfer_completion,
+ msecs_to_jiffies(ms));
+ }
+
+ if (ms == 0) {
+ dev_err(&msg->spi->dev, "SPI transfer timed out\n");
+ msg->status = -ETIMEDOUT;
+ }
+
+ trace_spi_transfer_stop(msg, xfer);
+
+ if (msg->status != -EINPROGRESS)
+ goto out;
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change) {
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ keep_cs = true;
+ } else {
+ spi_set_cs(msg->spi, false);
+ udelay(10);
+ spi_set_cs(msg->spi, true);
+ }
+ }
+
+ msg->actual_length += xfer->len;
+ }
+
+out:
+ if (ret != 0 || !keep_cs)
+ spi_set_cs(msg->spi, false);
+
+ if (msg->status == -EINPROGRESS)
+ msg->status = ret;
+
+ spi_finalize_current_message(master);
+
+ return ret;
+}
+
+/**
+ * spi_finalize_current_transfer - report completion of a transfer
+ *
+ * Called by SPI drivers using the core transfer_one_message()
+ * implementation to notify it that the current interrupt driven
+ * transfer has finished and the next one may be scheduled.
+ */
+void spi_finalize_current_transfer(struct spi_master *master)
+{
+ complete(&master->xfer_completion);
+}
+EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
+
/**
* spi_pump_messages - kthread work function which processes spi message queue
* @work: pointer to kthread work struct contained in the master struct
@@ -532,17 +874,25 @@ static void spi_pump_messages(struct kthread_work *work)
/* Lock queue and check for queue work */
spin_lock_irqsave(&master->queue_lock, flags);
if (list_empty(&master->queue) || !master->running) {
- if (master->busy && master->unprepare_transfer_hardware) {
- ret = master->unprepare_transfer_hardware(master);
- if (ret) {
- spin_unlock_irqrestore(&master->queue_lock, flags);
- dev_err(&master->dev,
- "failed to unprepare transfer hardware\n");
- return;
- }
+ if (!master->busy) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
}
master->busy = false;
spin_unlock_irqrestore(&master->queue_lock, flags);
+ kfree(master->dummy_rx);
+ master->dummy_rx = NULL;
+ kfree(master->dummy_tx);
+ master->dummy_tx = NULL;
+ if (master->unprepare_transfer_hardware &&
+ master->unprepare_transfer_hardware(master))
+ dev_err(&master->dev,
+ "failed to unprepare transfer hardware\n");
+ if (master->auto_runtime_pm) {
+ pm_runtime_mark_last_busy(master->dev.parent);
+ pm_runtime_put_autosuspend(master->dev.parent);
+ }
+ trace_spi_master_idle(master);
return;
}
@@ -553,7 +903,7 @@ static void spi_pump_messages(struct kthread_work *work)
}
/* Extract head of queue */
master->cur_msg =
- list_entry(master->queue.next, struct spi_message, queue);
+ list_first_entry(&master->queue, struct spi_message, queue);
list_del_init(&master->cur_msg->queue);
if (master->busy)
@@ -562,15 +912,51 @@ static void spi_pump_messages(struct kthread_work *work)
master->busy = true;
spin_unlock_irqrestore(&master->queue_lock, flags);
+ if (!was_busy && master->auto_runtime_pm) {
+ ret = pm_runtime_get_sync(master->dev.parent);
+ if (ret < 0) {
+ dev_err(&master->dev, "Failed to power device: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ if (!was_busy)
+ trace_spi_master_busy(master);
+
if (!was_busy && master->prepare_transfer_hardware) {
ret = master->prepare_transfer_hardware(master);
if (ret) {
dev_err(&master->dev,
"failed to prepare transfer hardware\n");
+
+ if (master->auto_runtime_pm)
+ pm_runtime_put(master->dev.parent);
return;
}
}
+ trace_spi_message_start(master->cur_msg);
+
+ if (master->prepare_message) {
+ ret = master->prepare_message(master, master->cur_msg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to prepare message: %d\n", ret);
+ master->cur_msg->status = ret;
+ spi_finalize_current_message(master);
+ return;
+ }
+ master->cur_msg_prepared = true;
+ }
+
+ ret = spi_map_msg(master, master->cur_msg);
+ if (ret) {
+ master->cur_msg->status = ret;
+ spi_finalize_current_message(master);
+ return;
+ }
+
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
@@ -591,7 +977,7 @@ static int spi_init_queue(struct spi_master *master)
init_kthread_worker(&master->kworker);
master->kworker_task = kthread_run(kthread_worker_fn,
- &master->kworker,
+ &master->kworker, "%s",
dev_name(&master->dev));
if (IS_ERR(master->kworker_task)) {
dev_err(&master->dev, "failed to create message pump task\n");
@@ -630,11 +1016,8 @@ struct spi_message *spi_get_next_queued_message(struct spi_master *master)
/* get a pointer to the next message, if any */
spin_lock_irqsave(&master->queue_lock, flags);
- if (list_empty(&master->queue))
- next = NULL;
- else
- next = list_entry(master->queue.next,
- struct spi_message, queue);
+ next = list_first_entry_or_null(&master->queue, struct spi_message,
+ queue);
spin_unlock_irqrestore(&master->queue_lock, flags);
return next;
@@ -652,6 +1035,7 @@ void spi_finalize_current_message(struct spi_master *master)
{
struct spi_message *mesg;
unsigned long flags;
+ int ret;
spin_lock_irqsave(&master->queue_lock, flags);
mesg = master->cur_msg;
@@ -660,9 +1044,22 @@ void spi_finalize_current_message(struct spi_master *master)
queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
+ spi_unmap_msg(master, mesg);
+
+ if (master->cur_msg_prepared && master->unprepare_message) {
+ ret = master->unprepare_message(master, mesg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to unprepare message: %d\n", ret);
+ }
+ }
+ master->cur_msg_prepared = false;
+
mesg->state = NULL;
if (mesg->complete)
mesg->complete(mesg->context);
+
+ trace_spi_message_done(mesg);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);
@@ -702,7 +1099,7 @@ static int spi_stop_queue(struct spi_master *master)
*/
while ((!list_empty(&master->queue) || master->busy) && limit--) {
spin_unlock_irqrestore(&master->queue_lock, flags);
- msleep(10);
+ usleep_range(10000, 11000);
spin_lock_irqsave(&master->queue_lock, flags);
}
@@ -764,7 +1161,7 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
msg->status = -EINPROGRESS;
list_add_tail(&msg->queue, &master->queue);
- if (master->running && !master->busy)
+ if (!master->busy)
queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
@@ -775,8 +1172,9 @@ static int spi_master_initialize_queue(struct spi_master *master)
{
int ret;
- master->queued = true;
master->transfer = spi_queued_transfer;
+ if (!master->transfer_one_message)
+ master->transfer_one_message = spi_transfer_one_message;
/* Initialize and start queue */
ret = spi_init_queue(master);
@@ -784,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master)
dev_err(&master->dev, "problem initializing queue\n");
goto err_init_queue;
}
+ master->queued = true;
ret = spi_start_queue(master);
if (ret) {
dev_err(&master->dev, "problem starting queue\n");
@@ -793,14 +1192,14 @@ static int spi_master_initialize_queue(struct spi_master *master)
return 0;
err_start_queue:
-err_init_queue:
spi_destroy_queue(master);
+err_init_queue:
return ret;
}
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_OF) && !defined(CONFIG_SPARC)
+#if defined(CONFIG_OF)
/**
* of_register_spi_devices() - Register child devices onto the SPI bus
* @master: Pointer to spi_master device
@@ -812,14 +1211,13 @@ static void of_register_spi_devices(struct spi_master *master)
{
struct spi_device *spi;
struct device_node *nc;
- const __be32 *prop;
int rc;
- int len;
+ u32 value;
if (!master->dev.of_node)
return;
- for_each_child_of_node(master->dev.of_node, nc) {
+ for_each_available_child_of_node(master->dev.of_node, nc) {
/* Alloc an spi_device */
spi = spi_alloc_device(master);
if (!spi) {
@@ -839,14 +1237,14 @@ static void of_register_spi_devices(struct spi_master *master)
}
/* Device address */
- prop = of_get_property(nc, "reg", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_err(&master->dev, "%s has no 'reg' property\n",
- nc->full_name);
+ rc = of_property_read_u32(nc, "reg", &value);
+ if (rc) {
+ dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
+ nc->full_name, rc);
spi_dev_put(spi);
continue;
}
- spi->chip_select = be32_to_cpup(prop);
+ spi->chip_select = value;
/* Mode (clock phase/polarity/etc.) */
if (of_find_property(nc, "spi-cpha", NULL))
@@ -855,16 +1253,57 @@ static void of_register_spi_devices(struct spi_master *master)
spi->mode |= SPI_CPOL;
if (of_find_property(nc, "spi-cs-high", NULL))
spi->mode |= SPI_CS_HIGH;
+ if (of_find_property(nc, "spi-3wire", NULL))
+ spi->mode |= SPI_3WIRE;
+ if (of_find_property(nc, "spi-lsb-first", NULL))
+ spi->mode |= SPI_LSB_FIRST;
+
+ /* Device DUAL/QUAD mode */
+ if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
+ switch (value) {
+ case 1:
+ break;
+ case 2:
+ spi->mode |= SPI_TX_DUAL;
+ break;
+ case 4:
+ spi->mode |= SPI_TX_QUAD;
+ break;
+ default:
+ dev_warn(&master->dev,
+ "spi-tx-bus-width %d not supported\n",
+ value);
+ break;
+ }
+ }
+
+ if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
+ switch (value) {
+ case 1:
+ break;
+ case 2:
+ spi->mode |= SPI_RX_DUAL;
+ break;
+ case 4:
+ spi->mode |= SPI_RX_QUAD;
+ break;
+ default:
+ dev_warn(&master->dev,
+ "spi-rx-bus-width %d not supported\n",
+ value);
+ break;
+ }
+ }
/* Device speed */
- prop = of_get_property(nc, "spi-max-frequency", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
- nc->full_name);
+ rc = of_property_read_u32(nc, "spi-max-frequency", &value);
+ if (rc) {
+ dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
+ nc->full_name, rc);
spi_dev_put(spi);
continue;
}
- spi->max_speed_hz = be32_to_cpup(prop);
+ spi->max_speed_hz = value;
/* IRQ */
spi->irq = irq_of_parse_and_map(nc, 0);
@@ -874,7 +1313,7 @@ static void of_register_spi_devices(struct spi_master *master)
spi->dev.of_node = nc;
/* Register the new device */
- request_module(spi->modalias);
+ request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
rc = spi_add_device(spi);
if (rc) {
dev_err(&master->dev, "spi_device register error %s\n",
@@ -888,6 +1327,102 @@ static void of_register_spi_devices(struct spi_master *master)
static void of_register_spi_devices(struct spi_master *master) { }
#endif
+#ifdef CONFIG_ACPI
+static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct spi_device *spi = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_spi_serialbus *sb;
+
+ sb = &ares->data.spi_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
+ spi->chip_select = sb->device_selection;
+ spi->max_speed_hz = sb->connection_speed;
+
+ if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
+ spi->mode |= SPI_CPHA;
+ if (sb->clock_polarity == ACPI_SPI_START_HIGH)
+ spi->mode |= SPI_CPOL;
+ if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
+ spi->mode |= SPI_CS_HIGH;
+ }
+ } else if (spi->irq < 0) {
+ struct resource r;
+
+ if (acpi_dev_resource_interrupt(ares, 0, &r))
+ spi->irq = r.start;
+ }
+
+ /* Always tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct spi_master *master = data;
+ struct list_head resource_list;
+ struct acpi_device *adev;
+ struct spi_device *spi;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ spi = spi_alloc_device(master);
+ if (!spi) {
+ dev_err(&master->dev, "failed to allocate SPI device for %s\n",
+ dev_name(&adev->dev));
+ return AE_NO_MEMORY;
+ }
+
+ ACPI_COMPANION_SET(&spi->dev, adev);
+ spi->irq = -1;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_spi_add_resource, spi);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0 || !spi->max_speed_hz) {
+ spi_dev_put(spi);
+ return AE_OK;
+ }
+
+ adev->power.flags.ignore_parent = true;
+ strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
+ if (spi_add_device(spi)) {
+ adev->power.flags.ignore_parent = false;
+ dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
+ spi_dev_put(spi);
+ }
+
+ return AE_OK;
+}
+
+static void acpi_register_spi_devices(struct spi_master *master)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(master->dev.parent);
+ if (!handle)
+ return;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_spi_add_device, NULL,
+ master, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
+}
+#else
+static inline void acpi_register_spi_devices(struct spi_master *master) {}
+#endif /* CONFIG_ACPI */
+
static void spi_master_release(struct device *dev)
{
struct spi_master *master;
@@ -931,7 +1466,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
if (!dev)
return NULL;
- master = kzalloc(size + sizeof *master, GFP_KERNEL);
+ master = kzalloc(size + sizeof(*master), GFP_KERNEL);
if (!master)
return NULL;
@@ -946,6 +1481,47 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
}
EXPORT_SYMBOL_GPL(spi_alloc_master);
+#ifdef CONFIG_OF
+static int of_spi_register_master(struct spi_master *master)
+{
+ int nb, i, *cs;
+ struct device_node *np = master->dev.of_node;
+
+ if (!np)
+ return 0;
+
+ nb = of_gpio_named_count(np, "cs-gpios");
+ master->num_chipselect = max_t(int, nb, master->num_chipselect);
+
+ /* Return error only for an incorrectly formed cs-gpios property */
+ if (nb == 0 || nb == -ENOENT)
+ return 0;
+ else if (nb < 0)
+ return nb;
+
+ cs = devm_kzalloc(&master->dev,
+ sizeof(int) * master->num_chipselect,
+ GFP_KERNEL);
+ master->cs_gpios = cs;
+
+ if (!master->cs_gpios)
+ return -ENOMEM;
+
+ for (i = 0; i < master->num_chipselect; i++)
+ cs[i] = -ENOENT;
+
+ for (i = 0; i < nb; i++)
+ cs[i] = of_get_named_gpio(np, "cs-gpios", i);
+
+ return 0;
+}
+#else
+static int of_spi_register_master(struct spi_master *master)
+{
+ return 0;
+}
+#endif
+
/**
* spi_register_master - register SPI master controller
* @master: initialized master, originally from spi_alloc_master()
@@ -977,12 +1553,19 @@ int spi_register_master(struct spi_master *master)
if (!dev)
return -ENODEV;
+ status = of_spi_register_master(master);
+ if (status)
+ return status;
+
/* even if it's just one always-selected device, there must
* be at least one chipselect
*/
if (master->num_chipselect == 0)
return -EINVAL;
+ if ((master->bus_num < 0) && master->dev.of_node)
+ master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
+
/* convention: dynamically assigned bus IDs count down from the max */
if (master->bus_num < 0) {
/* FIXME switch to an IDR based scheme, something like
@@ -995,6 +1578,9 @@ int spi_register_master(struct spi_master *master)
spin_lock_init(&master->bus_lock_spinlock);
mutex_init(&master->bus_lock_mutex);
master->bus_lock_flag = 0;
+ init_completion(&master->xfer_completion);
+ if (!master->max_dma_len)
+ master->max_dma_len = INT_MAX;
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
@@ -1012,7 +1598,7 @@ int spi_register_master(struct spi_master *master)
else {
status = spi_master_initialize_queue(master);
if (status) {
- device_unregister(&master->dev);
+ device_del(&master->dev);
goto done;
}
}
@@ -1023,13 +1609,49 @@ int spi_register_master(struct spi_master *master)
spi_match_master_to_boardinfo(master, &bi->board_info);
mutex_unlock(&board_lock);
- /* Register devices from the device tree */
+ /* Register devices from the device tree and ACPI */
of_register_spi_devices(master);
+ acpi_register_spi_devices(master);
done:
return status;
}
EXPORT_SYMBOL_GPL(spi_register_master);
+static void devm_spi_unregister(struct device *dev, void *res)
+{
+ spi_unregister_master(*(struct spi_master **)res);
+}
+
+/**
+ * dev_spi_register_master - register managed SPI master controller
+ * @dev: device managing SPI master
+ * @master: initialized master, originally from spi_alloc_master()
+ * Context: can sleep
+ *
+ * Register a SPI device as with spi_register_master() which will
+ * automatically be unregister
+ */
+int devm_spi_register_master(struct device *dev, struct spi_master *master)
+{
+ struct spi_master **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = spi_register_master(master);
+ if (!ret) {
+ *ptr = master;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_spi_register_master);
+
static int __unregister(struct device *dev, void *null)
{
spi_unregister_device(to_spi_device(dev));
@@ -1095,10 +1717,10 @@ int spi_master_resume(struct spi_master *master)
}
EXPORT_SYMBOL_GPL(spi_master_resume);
-static int __spi_master_match(struct device *dev, void *data)
+static int __spi_master_match(struct device *dev, const void *data)
{
struct spi_master *m;
- u16 *bus_num = data;
+ const u16 *bus_num = data;
m = container_of(dev, struct spi_master, dev);
return m->bus_num == *bus_num;
@@ -1155,13 +1777,35 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
*/
int spi_setup(struct spi_device *spi)
{
- unsigned bad_bits;
- int status;
+ unsigned bad_bits, ugly_bits;
+ int status = 0;
+ /* check mode to prevent that DUAL and QUAD set at the same time
+ */
+ if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
+ ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
+ dev_err(&spi->dev,
+ "setup: can not select dual and quad at the same time\n");
+ return -EINVAL;
+ }
+ /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
+ */
+ if ((spi->mode & SPI_3WIRE) && (spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
+ return -EINVAL;
/* help drivers fail *cleanly* when they need options
* that aren't supported with their current master
*/
bad_bits = spi->mode & ~spi->master->mode_bits;
+ ugly_bits = bad_bits &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
+ if (ugly_bits) {
+ dev_warn(&spi->dev,
+ "setup: ignoring unsupported mode bits %x\n",
+ ugly_bits);
+ spi->mode &= ~ugly_bits;
+ bad_bits &= ~ugly_bits;
+ }
if (bad_bits) {
dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
bad_bits);
@@ -1171,10 +1815,13 @@ int spi_setup(struct spi_device *spi)
if (!spi->bits_per_word)
spi->bits_per_word = 8;
- status = spi->master->setup(spi);
+ if (!spi->max_speed_hz)
+ spi->max_speed_hz = spi->master->max_speed_hz;
- dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s"
- "%u bits/w, %u Hz max --> %d\n",
+ if (spi->master->setup)
+ status = spi->master->setup(spi);
+
+ dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
@@ -1187,9 +1834,14 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
-static int __spi_async(struct spi_device *spi, struct spi_message *message)
+static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
+ struct spi_transfer *xfer;
+ int w_size;
+
+ if (list_empty(&message->transfers))
+ return -EINVAL;
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
@@ -1198,7 +1850,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
*/
if ((master->flags & SPI_MASTER_HALF_DUPLEX)
|| (spi->mode & SPI_3WIRE)) {
- struct spi_transfer *xfer;
unsigned flags = master->flags;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
@@ -1211,8 +1862,100 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
}
}
- message->spi = spi;
+ /**
+ * Set transfer bits_per_word and max speed as spi device default if
+ * it is not set for this transfer.
+ * Set transfer tx_nbits and rx_nbits as single transfer default
+ * (SPI_NBITS_SINGLE) if it is not set for this transfer.
+ */
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ message->frame_length += xfer->len;
+ if (!xfer->bits_per_word)
+ xfer->bits_per_word = spi->bits_per_word;
+
+ if (!xfer->speed_hz)
+ xfer->speed_hz = spi->max_speed_hz;
+
+ if (master->max_speed_hz &&
+ xfer->speed_hz > master->max_speed_hz)
+ xfer->speed_hz = master->max_speed_hz;
+
+ if (master->bits_per_word_mask) {
+ /* Only 32 bits fit in the mask */
+ if (xfer->bits_per_word > 32)
+ return -EINVAL;
+ if (!(master->bits_per_word_mask &
+ BIT(xfer->bits_per_word - 1)))
+ return -EINVAL;
+ }
+
+ /*
+ * SPI transfer length should be multiple of SPI word size
+ * where SPI word size should be power-of-two multiple
+ */
+ if (xfer->bits_per_word <= 8)
+ w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ w_size = 2;
+ else
+ w_size = 4;
+
+ /* No partial transfers accepted */
+ if (xfer->len % w_size)
+ return -EINVAL;
+
+ if (xfer->speed_hz && master->min_speed_hz &&
+ xfer->speed_hz < master->min_speed_hz)
+ return -EINVAL;
+
+ if (xfer->tx_buf && !xfer->tx_nbits)
+ xfer->tx_nbits = SPI_NBITS_SINGLE;
+ if (xfer->rx_buf && !xfer->rx_nbits)
+ xfer->rx_nbits = SPI_NBITS_SINGLE;
+ /* check transfer tx/rx_nbits:
+ * 1. check the value matches one of single, dual and quad
+ * 2. check tx/rx_nbits match the mode in spi_device
+ */
+ if (xfer->tx_buf) {
+ if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
+ xfer->tx_nbits != SPI_NBITS_DUAL &&
+ xfer->tx_nbits != SPI_NBITS_QUAD)
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
+ !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
+ !(spi->mode & SPI_TX_QUAD))
+ return -EINVAL;
+ }
+ /* check transfer rx_nbits */
+ if (xfer->rx_buf) {
+ if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
+ xfer->rx_nbits != SPI_NBITS_DUAL &&
+ xfer->rx_nbits != SPI_NBITS_QUAD)
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
+ !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
+ !(spi->mode & SPI_RX_QUAD))
+ return -EINVAL;
+ }
+ }
+
message->status = -EINPROGRESS;
+
+ return 0;
+}
+
+static int __spi_async(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+
+ message->spi = spi;
+
+ trace_spi_message_submit(message);
+
return master->transfer(spi, message);
}
@@ -1251,6 +1994,10 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
int ret;
unsigned long flags;
+ ret = __spi_validate(spi, message);
+ if (ret != 0)
+ return ret;
+
spin_lock_irqsave(&master->bus_lock_spinlock, flags);
if (master->bus_lock_flag)
@@ -1299,6 +2046,10 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
int ret;
unsigned long flags;
+ ret = __spi_validate(spi, message);
+ if (ret != 0)
+ return ret;
+
spin_lock_irqsave(&master->bus_lock_spinlock, flags);
ret = __spi_async(spi, message);
@@ -1453,7 +2204,7 @@ int spi_bus_unlock(struct spi_master *master)
EXPORT_SYMBOL_GPL(spi_bus_unlock);
/* portable code must never pass more than 32 bytes */
-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
+#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
static u8 *buf;
@@ -1487,15 +2238,22 @@ int spi_write_then_read(struct spi_device *spi,
struct spi_transfer x[2];
u8 *local_buf;
- /* Use preallocated DMA-safe buffer. We can't avoid copying here,
- * (as a pure convenience thing), but we can keep heap costs
- * out of the hot path ...
+ /* Use preallocated DMA-safe buffer if we can. We can't avoid
+ * copying here, (as a pure convenience thing), but we can
+ * keep heap costs out of the hot path unless someone else is
+ * using the pre-allocated buffer or the transfer is too large.
*/
- if ((n_tx + n_rx) > SPI_BUFSIZ)
- return -EINVAL;
+ if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
+ local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
+ GFP_KERNEL | GFP_DMA);
+ if (!local_buf)
+ return -ENOMEM;
+ } else {
+ local_buf = buf;
+ }
spi_message_init(&message);
- memset(x, 0, sizeof x);
+ memset(x, 0, sizeof(x));
if (n_tx) {
x[0].len = n_tx;
spi_message_add_tail(&x[0], &message);
@@ -1505,14 +2263,6 @@ int spi_write_then_read(struct spi_device *spi,
spi_message_add_tail(&x[1], &message);
}
- /* ... unless someone else is using the pre-allocated buffer */
- if (!mutex_trylock(&lock)) {
- local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
- if (!local_buf)
- return -ENOMEM;
- } else
- local_buf = buf;
-
memcpy(local_buf, txbuf, n_tx);
x[0].tx_buf = local_buf;
x[1].rx_buf = local_buf + n_tx;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 830adbed1d7..e3bc23bb588 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -31,11 +31,13 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
@@ -71,7 +73,8 @@ static DECLARE_BITMAP(minors, N_SPI_MINORS);
*/
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
- | SPI_NO_CS | SPI_READY)
+ | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
+ | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)
struct spidev_data {
dev_t devt;
@@ -204,9 +207,9 @@ spidev_write(struct file *filp, const char __user *buf,
mutex_lock(&spidev->buf_lock);
missing = copy_from_user(spidev->buffer, buf, count);
- if (missing == 0) {
+ if (missing == 0)
status = spidev_sync_write(spidev, count);
- } else
+ else
status = -EFAULT;
mutex_unlock(&spidev->buf_lock);
@@ -263,6 +266,8 @@ static int spidev_message(struct spidev_data *spidev,
buf += k_tmp->len;
k_tmp->cs_change = !!u_tmp->cs_change;
+ k_tmp->tx_nbits = u_tmp->tx_nbits;
+ k_tmp->rx_nbits = u_tmp->rx_nbits;
k_tmp->bits_per_word = u_tmp->bits_per_word;
k_tmp->delay_usecs = u_tmp->delay_usecs;
k_tmp->speed_hz = u_tmp->speed_hz;
@@ -357,6 +362,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = __put_user(spi->mode & SPI_MODE_MASK,
(__u8 __user *)arg);
break;
+ case SPI_IOC_RD_MODE32:
+ retval = __put_user(spi->mode & SPI_MODE_MASK,
+ (__u32 __user *)arg);
+ break;
case SPI_IOC_RD_LSB_FIRST:
retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
(__u8 __user *)arg);
@@ -370,9 +379,13 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* write requests */
case SPI_IOC_WR_MODE:
- retval = __get_user(tmp, (u8 __user *)arg);
+ case SPI_IOC_WR_MODE32:
+ if (cmd == SPI_IOC_WR_MODE)
+ retval = __get_user(tmp, (u8 __user *)arg);
+ else
+ retval = __get_user(tmp, (u32 __user *)arg);
if (retval == 0) {
- u8 save = spi->mode;
+ u32 save = spi->mode;
if (tmp & ~SPI_MODE_MASK) {
retval = -EINVAL;
@@ -380,18 +393,18 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
tmp |= spi->mode & ~SPI_MODE_MASK;
- spi->mode = (u8)tmp;
+ spi->mode = (u16)tmp;
retval = spi_setup(spi);
if (retval < 0)
spi->mode = save;
else
- dev_dbg(&spi->dev, "spi mode %02x\n", tmp);
+ dev_dbg(&spi->dev, "spi mode %x\n", tmp);
}
break;
case SPI_IOC_WR_LSB_FIRST:
retval = __get_user(tmp, (__u8 __user *)arg);
if (retval == 0) {
- u8 save = spi->mode;
+ u32 save = spi->mode;
if (tmp)
spi->mode |= SPI_LSB_FIRST;
@@ -571,7 +584,7 @@ static struct class *spidev_class;
/*-------------------------------------------------------------------------*/
-static int __devinit spidev_probe(struct spi_device *spi)
+static int spidev_probe(struct spi_device *spi)
{
struct spidev_data *spidev;
int status;
@@ -601,7 +614,7 @@ static int __devinit spidev_probe(struct spi_device *spi)
dev = device_create(spidev_class, &spi->dev, spidev->devt,
spidev, "spidev%d.%d",
spi->master->bus_num, spi->chip_select);
- status = IS_ERR(dev) ? PTR_ERR(dev) : 0;
+ status = PTR_ERR_OR_ZERO(dev);
} else {
dev_dbg(&spi->dev, "no minor number available!\n");
status = -ENODEV;
@@ -620,14 +633,13 @@ static int __devinit spidev_probe(struct spi_device *spi)
return status;
}
-static int __devexit spidev_remove(struct spi_device *spi)
+static int spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
/* make sure ops on existing fds can abort cleanly */
spin_lock_irq(&spidev->spi_lock);
spidev->spi = NULL;
- spi_set_drvdata(spi, NULL);
spin_unlock_irq(&spidev->spi_lock);
/* prevent new opens */
@@ -642,13 +654,21 @@ static int __devexit spidev_remove(struct spi_device *spi)
return 0;
}
+static const struct of_device_id spidev_dt_ids[] = {
+ { .compatible = "rohm,dh2228fv" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, spidev_dt_ids);
+
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spidev_dt_ids),
},
.probe = spidev_probe,
- .remove = __devexit_p(spidev_remove),
+ .remove = spidev_remove,
/* NOTE: suspend/resume methods are not necessary here.
* We don't do anything except pass the requests to/from