diff options
Diffstat (limited to 'drivers/ata')
113 files changed, 64035 insertions, 20441 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 3f4aa0c99ee..2664da32d9d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -2,22 +2,85 @@ # SATA/PATA driver configuration # -menu "Serial ATA (prod) and Parallel ATA (experimental) drivers" +config HAVE_PATA_PLATFORM + bool + help + This is an internal configuration node for any machine that + uses pata-platform driver to enable the relevant driver in the + configuration structure without having to submit endless patches + to update the PATA_PLATFORM entry. -config ATA - tristate "ATA device support" - depends on !(M32R || M68K) || BROKEN - depends on !SUN4 || BROKEN +menuconfig ATA + tristate "Serial ATA and Parallel ATA drivers (libata)" + depends on HAS_IOMEM + depends on BLOCK + depends on !(M32R || M68K || S390) || BROKEN select SCSI ---help--- - If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or + If you want to use an ATA hard disk, ATA tape drive, ATA CD-ROM or any other ATA device under Linux, say Y and make sure that you know the name of your ATA host adapter (the card inside your computer that "speaks" the ATA protocol, also called ATA controller), because you will be asked for it. + NOTE: ATA enables basic SCSI support; *however*, + 'SCSI disk support', 'SCSI tape support', or + 'SCSI CDROM support' may also be needed, + depending on your hardware configuration. + if ATA +config ATA_NONSTANDARD + bool + default n + +config ATA_VERBOSE_ERROR + bool "Verbose ATA error reporting" + default y + help + This option adds parsing of ATA command descriptions and error bits + in libata kernel output, making it easier to interpret. + This option will enlarge the kernel by approx. 6KB. Disable it only + if kernel size is more important than ease of debugging. + + If unsure, say Y. + +config ATA_ACPI + bool "ATA ACPI Support" + depends on ACPI && PCI + default y + help + This option adds support for ATA-related ACPI objects. + These ACPI objects add the ability to retrieve taskfiles + from the ACPI BIOS and write them to the disk controller. + These objects may be related to performance, security, + power management, or other areas. + You can disable this at kernel boot time by using the + option libata.noacpi=1 + +config SATA_ZPODD + bool "SATA Zero Power Optical Disc Drive (ZPODD) support" + depends on ATA_ACPI && PM_RUNTIME + default n + help + This option adds support for SATA Zero Power Optical Disc + Drive (ZPODD). It requires both the ODD and the platform + support, and if enabled, will automatically power on/off the + ODD when certain condition is satisfied. This does not impact + end user's experience of the ODD, only power is saved when + the ODD is not in use (i.e. no disc inside). + + If unsure, say N. + +config SATA_PMP + bool "SATA Port Multiplier support" + default y + help + This option adds support for SATA Port Multipliers + (the SATA version of an ethernet hub, or SAS expander). + +comment "Controllers with non-SFF native interface" + config SATA_AHCI tristate "AHCI SATA support" depends on PCI @@ -26,42 +89,117 @@ config SATA_AHCI If unsure, say N. -config SATA_SVW - tristate "ServerWorks Frodo / Apple K2 SATA support" - depends on PCI +config SATA_AHCI_PLATFORM + tristate "Platform AHCI SATA support" help - This option enables support for Broadcom/Serverworks/Apple K2 - SATA support. + This option enables support for Platform AHCI Serial ATA + controllers. If unsure, say N. -config ATA_PIIX - tristate "Intel PIIX/ICH SATA support" - depends on PCI +config AHCI_DA850 + tristate "DaVinci DA850 AHCI SATA support" + depends on ARCH_DAVINCI_DA850 help - This option enables support for ICH5/6/7/8 Serial ATA. - If PATA support was enabled previously, this enables - support for select Intel PIIX/ICH PATA host controllers. + This option enables support for the DaVinci DA850 SoC's + onboard AHCI SATA. If unsure, say N. -config SATA_MV - tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL +config AHCI_ST + tristate "ST AHCI SATA support" + depends on ARCH_STI help - This option enables support for the Marvell Serial ATA family. - Currently supports 88SX[56]0[48][01] chips. + This option enables support for ST AHCI SATA controller. If unsure, say N. -config SATA_NV - tristate "NVIDIA SATA support" +config AHCI_IMX + tristate "Freescale i.MX AHCI SATA support" + depends on MFD_SYSCON && (ARCH_MXC || COMPILE_TEST) + help + This option enables support for the Freescale i.MX SoC's + onboard AHCI SATA. + + If unsure, say N. + +config AHCI_MVEBU + tristate "Marvell EBU AHCI SATA support" + depends on ARCH_MVEBU + help + This option enables support for the Marvebu EBU SoC's + onboard AHCI SATA. + + If unsure, say N. + +config AHCI_SUNXI + tristate "Allwinner sunxi AHCI SATA support" + depends on ARCH_SUNXI + help + This option enables support for the Allwinner sunxi SoC's + onboard AHCI SATA. + + If unsure, say N. + +config AHCI_XGENE + tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support" + depends on PHY_XGENE + help + This option enables support for APM X-Gene SoC SATA host controller. + +config SATA_FSL + tristate "Freescale 3.0Gbps SATA support" + depends on FSL_SOC + help + This option enables support for Freescale 3.0Gbps SATA controller. + It can be found on MPC837x and MPC8315. + + If unsure, say N. + +config SATA_INIC162X + tristate "Initio 162x SATA support (Very Experimental)" depends on PCI help - This option enables support for NVIDIA Serial ATA. + This option enables support for Initio 162x Serial ATA. + +config SATA_ACARD_AHCI + tristate "ACard AHCI variant (ATP 8620)" + depends on PCI + help + This option enables support for Acard. + + If unsure, say N. + +config SATA_SIL24 + tristate "Silicon Image 3124/3132 SATA support" + depends on PCI + help + This option enables support for Silicon Image 3124/3132 Serial ATA. If unsure, say N. +config ATA_SFF + bool "ATA SFF support (for legacy IDE and PATA)" + default y + help + This option adds support for ATA controllers with SFF + compliant or similar programming interface. + + SFF is the legacy IDE interface that has been around since + the dawn of time. Almost all PATA controllers have an + SFF interface. Many SATA controllers have an SFF interface + when configured into a legacy compatibility mode. + + For users with exclusively modern controllers like AHCI, + Silicon Image 3124, or Marvell 6440, you may choose to + disable this unneeded SFF support. + + If unsure, say Y. + +if ATA_SFF + +comment "SFF controllers with custom DMA interface" + config PDC_ADMA tristate "Pacific Digital ADMA support" depends on PCI @@ -70,6 +208,15 @@ config PDC_ADMA If unsure, say N. +config PATA_OCTEON_CF + tristate "OCTEON Boot Bus Compact Flash support" + depends on CAVIUM_OCTEON_SOC + help + This option enables a polled compact flash driver for use with + compact flash cards attached to the OCTEON boot bus. + + If unsure, say N. + config SATA_QSTOR tristate "Pacific Digital SATA QStor support" depends on PCI @@ -78,6 +225,97 @@ config SATA_QSTOR If unsure, say N. +config SATA_SX4 + tristate "Promise SATA SX4 support (Experimental)" + depends on PCI + help + This option enables support for Promise Serial ATA SX4. + + If unsure, say N. + +config ATA_BMDMA + bool "ATA BMDMA support" + default y + help + This option adds support for SFF ATA controllers with BMDMA + capability. BMDMA stands for bus-master DMA and is the + de facto DMA interface for SFF controllers. + + If unsure, say Y. + +if ATA_BMDMA + +comment "SATA SFF controllers with BMDMA" + +config ATA_PIIX + tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support" + depends on PCI + help + This option enables support for ICH5/6/7/8 Serial ATA + and support for PATA on the Intel ESB/ICH/PIIX3/PIIX4 series + host controllers. + + If unsure, say N. + +config SATA_DWC + tristate "DesignWare Cores SATA support" + depends on 460EX + help + This option enables support for the on-chip SATA controller of the + AppliedMicro processor 460EX. + + If unsure, say N. + +config SATA_DWC_DEBUG + bool "Debugging driver version" + depends on SATA_DWC + help + This option enables debugging output in the driver. + +config SATA_DWC_VDEBUG + bool "Verbose debug output" + depends on SATA_DWC_DEBUG + help + This option enables the taskfile dumping and NCQ debugging. + +config SATA_DWC_PMP + tristate "DesignWare Cores SATA with PMP support" + depends on 460EX + help + This option enables support for the on-chip SATA controller of the + AppliedMicro processor 460EX with PMP support. + + If unsure, say N. + +config SATA_HIGHBANK + tristate "Calxeda Highbank SATA support" + depends on ARCH_HIGHBANK || COMPILE_TEST + help + This option enables support for the Calxeda Highbank SoC's + onboard SATA. + + If unsure, say N. + +config SATA_MV + tristate "Marvell SATA support" + depends on PCI || ARCH_DOVE || ARCH_KIRKWOOD || ARCH_MV78XX0 || \ + ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST + select GENERIC_PHY + help + This option enables support for the Marvell Serial ATA family. + Currently supports 88SX[56]0[48][01] PCI(-X) chips, + as well as the newer [67]042 PCI-X/PCIe and SOC devices. + + If unsure, say N. + +config SATA_NV + tristate "NVIDIA SATA support" + depends on PCI + help + This option enables support for NVIDIA Serial ATA. + + If unsure, say N. + config SATA_PROMISE tristate "Promise SATA TX2/TX4 support" depends on PCI @@ -86,11 +324,11 @@ config SATA_PROMISE If unsure, say N. -config SATA_SX4 - tristate "Promise SATA SX4 support" - depends on PCI && EXPERIMENTAL +config SATA_RCAR + tristate "Renesas R-Car SATA support" + depends on ARCH_SHMOBILE || COMPILE_TEST help - This option enables support for Promise Serial ATA SX4. + This option enables support for Renesas R-Car Serial ATA. If unsure, say N. @@ -102,19 +340,23 @@ config SATA_SIL If unsure, say N. -config SATA_SIL24 - tristate "Silicon Image 3124/3132 SATA support" +config SATA_SIS + tristate "SiS 964/965/966/180 SATA support" depends on PCI + select PATA_SIS help - This option enables support for Silicon Image 3124/3132 Serial ATA. - + This option enables support for SiS Serial ATA on + SiS 964/965/966/180 and Parallel ATA on SiS 180. + The PATA support for SiS 180 requires additionally to + enable the PATA_SIS driver in the config. If unsure, say N. -config SATA_SIS - tristate "SiS 964/180 SATA support" +config SATA_SVW + tristate "ServerWorks Frodo / Apple K2 SATA support" depends on PCI help - This option enables support for SiS Serial ATA 964/180. + This option enables support for Broadcom/Serverworks/Apple K2 + SATA support. If unsure, say N. @@ -142,14 +384,11 @@ config SATA_VITESSE If unsure, say N. -config SATA_INTEL_COMBINED - bool - depends on IDE=y && !BLK_DEV_IDE_SATA && (SATA_AHCI || ATA_PIIX) - default y +comment "PATA SFF controllers with BMDMA" config PATA_ALI - tristate "ALi PATA support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "ALi PATA support" + depends on PCI help This option enables support for the ALi ATA interfaces found on the many ALi chipsets. @@ -157,7 +396,7 @@ config PATA_ALI If unsure, say N. config PATA_AMD - tristate "AMD/NVidia PATA support (Experimental)" + tristate "AMD/NVidia PATA support" depends on PCI help This option enables support for the AMD and NVidia PATA @@ -165,26 +404,52 @@ config PATA_AMD If unsure, say N. +config PATA_ARASAN_CF + tristate "ARASAN CompactFlash PATA Controller Support" + depends on ARCH_SPEAR13XX || COMPILE_TEST + depends on DMADEVICES + select DMA_ENGINE + help + Say Y here to support the ARASAN CompactFlash PATA controller + config PATA_ARTOP - tristate "ARTOP 6210/6260 PATA support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "ARTOP 6210/6260 PATA support" + depends on PCI help This option enables support for ARTOP PATA controllers. If unsure, say N. config PATA_ATIIXP - tristate "ATI PATA support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "ATI PATA support" + depends on PCI help This option enables support for the ATI ATA interfaces found on the many ATI chipsets. If unsure, say N. +config PATA_ATP867X + tristate "ARTOP/Acard ATP867X PATA support" + depends on PCI + help + This option enables support for ARTOP/Acard ATP867X PATA + controllers. + + If unsure, say N. + +config PATA_BF54X + tristate "Blackfin 54x ATAPI support" + depends on BF542 || BF548 || BF549 + help + This option enables support for the built-in ATAPI controller on + Blackfin 54x family chips. + + If unsure, say N. + config PATA_CMD64X - tristate "CMD64x PATA support (Very Experimental)" - depends on PCI&& EXPERIMENTAL + tristate "CMD64x PATA support" + depends on PCI help This option enables support for the CMD64x series chips except for the CMD640. @@ -193,7 +458,7 @@ config PATA_CMD64X config PATA_CS5520 tristate "CS5510/5520 PATA support" - depends on PCI + depends on PCI && (X86_32 || COMPILE_TEST) help This option enables support for the Cyrix 5510/5520 companion chip used with the MediaGX/Geode processor family. @@ -201,8 +466,8 @@ config PATA_CS5520 If unsure, say N. config PATA_CS5530 - tristate "CS5530 PATA support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "CS5530 PATA support" + depends on PCI && (X86_32 || COMPILE_TEST) help This option enables support for the Cyrix/NatSemi/AMD CS5530 companion chip used with the MediaGX/Geode processor family. @@ -211,16 +476,25 @@ config PATA_CS5530 config PATA_CS5535 tristate "CS5535 PATA support (Experimental)" - depends on PCI && X86 && !X86_64 && EXPERIMENTAL + depends on PCI && X86_32 help This option enables support for the NatSemi/AMD CS5535 companion chip used with the Geode processor family. If unsure, say N. +config PATA_CS5536 + tristate "CS5536 PATA support" + depends on PCI && (X86_32 || MIPS || COMPILE_TEST) + help + This option enables support for the AMD CS5536 + companion chip used with the Geode LX processor family. + + If unsure, say N. + config PATA_CYPRESS tristate "Cypress CY82C693 PATA support (Very Experimental)" - depends on PCI && EXPERIMENTAL + depends on PCI help This option enables support for the Cypress/Contaq CY82C693 chipset found in some Alpha systems @@ -236,18 +510,18 @@ config PATA_EFAR If unsure, say N. -config ATA_GENERIC - tristate "Generic ATA support" - depends on PCI +config PATA_EP93XX + tristate "Cirrus Logic EP93xx PATA support" + depends on ARCH_EP93XX help - This option enables support for generic BIOS configured - ATA controllers via the new ATA layer + This option enables support for the PATA controller in + the Cirrus Logic EP9312 and EP9315 ARM CPU. If unsure, say N. config PATA_HPT366 - tristate "HPT 366/368 PATA support (Very Experimental)" - depends on PCI && EXPERIMENTAL + tristate "HPT 366/368 PATA support" + depends on PCI help This option enables support for the HPT 366 and 368 PATA controllers via the new ATA layer. @@ -255,8 +529,8 @@ config PATA_HPT366 If unsure, say N. config PATA_HPT37X - tristate "HPT 370/370A/371/372/374/302 PATA support (Very Experimental)" - depends on PCI && EXPERIMENTAL + tristate "HPT 370/370A/371/372/374/302 PATA support" + depends on PCI help This option enables support for the majority of the later HPT PATA controllers via the new ATA layer. @@ -264,16 +538,16 @@ config PATA_HPT37X If unsure, say N. config PATA_HPT3X2N - tristate "HPT 372N/302N PATA support (Very Experimental)" - depends on PCI && EXPERIMENTAL + tristate "HPT 371N/372N/302N PATA support" + depends on PCI help This option enables support for the N variant HPT PATA - controllers via the new ATA layer + controllers via the new ATA layer. If unsure, say N. config PATA_HPT3X3 - tristate "HPT 343/363 PATA support (Experimental)" + tristate "HPT 343/363 PATA support" depends on PCI help This option enables support for the HPT 343/363 @@ -281,18 +555,43 @@ config PATA_HPT3X3 If unsure, say N. -config PATA_ISAPNP - tristate "ISA Plug and Play PATA support (Very Experimental)" - depends on EXPERIMENTAL && ISAPNP +config PATA_HPT3X3_DMA + bool "HPT 343/363 DMA support" + depends on PATA_HPT3X3 help - This option enables support for ISA plug & play ATA - controllers such as those found on old soundcards. + This option enables DMA support for the HPT343/363 + controllers. Enable with care as there are still some + problems with DMA on this chipset. + +config PATA_ICSIDE + tristate "Acorn ICS PATA support" + depends on ARM && ARCH_ACORN + help + On Acorn systems, say Y here if you wish to use the ICS PATA + interface card. This is not required for ICS partition support. + If you are unsure, say N to this. + +config PATA_IMX + tristate "PATA support for Freescale iMX" + depends on ARCH_MXC + help + This option enables support for the PATA host available on Freescale + iMX SoCs. + + If unsure, say N. + +config PATA_IT8213 + tristate "IT8213 PATA support (Experimental)" + depends on PCI + help + This option enables support for the ITE 821 PATA + controllers via the new ATA layer. If unsure, say N. config PATA_IT821X - tristate "IT821x PATA support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "IT8211/2 PATA support" + depends on PCI help This option enables support for the ITE 8211 and 8212 PATA controllers via the new ATA layer, including RAID @@ -309,37 +608,33 @@ config PATA_JMICRON If unsure, say N. -config PATA_LEGACY - tristate "Legacy ISA PATA support (Experimental)" - depends on ISA && EXPERIMENTAL +config PATA_MACIO + tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" + depends on PPC_PMAC help - This option enables support for ISA/VLB bus legacy PATA - ports and allows them to be accessed via the new ATA layer. + Most IDE capable PowerMacs have IDE busses driven by a variant + of this controller which is part of the Apple chipset used on + most PowerMac models. Some models have multiple busses using + different chipsets, though generally, MacIO is one of them. - If unsure, say N. - -config PATA_TRIFLEX - tristate "Compaq Triflex PATA support" +config PATA_MARVELL + tristate "Marvell PATA support via legacy mode" depends on PCI help - Enable support for the Compaq 'Triflex' IDE controller as found - on many Compaq Pentium-Pro systems, via the new ATA layer. + This option enables limited support for the Marvell 88SE61xx ATA + controllers. If you wish to use only the SATA ports then select + the AHCI driver alone. If you wish to the use the PATA port or + both SATA and PATA include this driver. If unsure, say N. -config PATA_MPIIX - tristate "Intel PATA MPIIX support" - depends on PCI +config PATA_MPC52xx + tristate "Freescale MPC52xx SoC internal IDE" + depends on PPC_MPC52xx && PPC_BESTCOMM + select PPC_BESTCOMM_ATA help - This option enables support for MPIIX PATA support. - - If unsure, say N. - -config PATA_OLDPIIX - tristate "Intel PATA old PIIX support (Experimental)" - depends on PCI && EXPERIMENTAL - help - This option enables support for old(?) PIIX PATA support. + This option enables support for integrated IDE controller + of the Freescale MPC52xx SoC. If unsure, say N. @@ -352,99 +647,111 @@ config PATA_NETCELL If unsure, say N. -config PATA_NS87410 - tristate "Nat Semi NS87410 PATA support (Experimental)" - depends on PCI && EXPERIMENTAL +config PATA_NINJA32 + tristate "Ninja32/Delkin Cardbus ATA support" + depends on PCI + help + This option enables support for the Ninja32, Delkin and + possibly other brands of Cardbus ATA adapter + + If unsure, say N. + +config PATA_NS87415 + tristate "Nat Semi NS87415 PATA support" + depends on PCI help This option enables support for the National Semiconductor - NS87410 PCI-IDE controller. + NS87415 PCI-IDE controller. If unsure, say N. -config PATA_OPTI - tristate "OPTI621/6215 PATA support (Very Experimental)" - depends on PCI && EXPERIMENTAL +config PATA_OLDPIIX + tristate "Intel PATA old PIIX support" + depends on PCI help - This option enables full PIO support for the early Opti ATA - controllers found on some old motherboards. + This option enables support for early PIIX PATA support. If unsure, say N. config PATA_OPTIDMA - tristate "OPTI FireStar PATA support (Veyr Experimental)" - depends on PCI && EXPERIMENTAL + tristate "OPTI FireStar PATA support (Very Experimental)" + depends on PCI help This option enables DMA/PIO support for the later OPTi controllers found on some old motherboards and in some - latops + laptops. If unsure, say N. -config PATA_PCMCIA - tristate "PCMCIA PATA support" - depends on PCMCIA +config PATA_PDC2027X + tristate "Promise PATA 2027x support" + depends on PCI help - This option enables support for PCMCIA ATA interfaces, including - compact flash card adapters via the new ATA layer. + This option enables support for Promise PATA pdc20268 to pdc20277 host adapters. If unsure, say N. config PATA_PDC_OLD - tristate "Older Promise PATA controller support (Very Experimental)" - depends on PCI && EXPERIMENTAL + tristate "Older Promise PATA controller support" + depends on PCI help This option enables support for the Promise 20246, 20262, 20263, 20265 and 20267 adapters. If unsure, say N. -config PATA_QDI - tristate "QDI VLB PATA support" - depends on ISA - help - Support for QDI 6500 and 6580 PATA controllers on VESA local bus. - config PATA_RADISYS - tristate "RADISYS 82600 PATA support (Very experimental)" - depends on PCI && EXPERIMENTAL + tristate "RADISYS 82600 PATA support (Experimental)" + depends on PCI help This option enables support for the RADISYS 82600 PATA controllers via the new ATA layer If unsure, say N. -config PATA_RZ1000 - tristate "PC Tech RZ1000 PATA support" +config PATA_RDC + tristate "RDC PATA support" depends on PCI help - This option enables basic support for the PC Tech RZ1000/1 - PATA controllers via the new ATA layer + This option enables basic support for the later RDC PATA controllers + controllers via the new ATA layer. For the RDC 1010, you need to + enable the IT821X driver instead. If unsure, say N. config PATA_SC1200 - tristate "SC1200 PATA support (Raving Lunatic)" - depends on PCI && EXPERIMENTAL + tristate "SC1200 PATA support" + depends on PCI && (X86_32 || COMPILE_TEST) help This option enables support for the NatSemi/AMD SC1200 SoC companion chip used with the Geode processor family. If unsure, say N. -config PATA_SERVERWORKS - tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support (Experimental)" - depends on PCI && EXPERIMENTAL +config PATA_SCC + tristate "Toshiba's Cell Reference Set IDE support" + depends on PCI && PPC_CELLEB help - This option enables support for the Serverworks OSB4/CSB5/CSB6 and - HT1000 PATA controllers, via the new ATA layer. + This option enables support for the built-in IDE controller on + Toshiba Cell Reference Board. If unsure, say N. -config PATA_PDC2027X - tristate "Promise PATA 2027x support" +config PATA_SCH + tristate "Intel SCH PATA support" depends on PCI help - This option enables support for Promise PATA pdc20268 to pdc20277 host adapters. + This option enables support for Intel SCH PATA on the Intel + SCH (US15W, US15L, UL11L) series host controllers. + + If unsure, say N. + +config PATA_SERVERWORKS + tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support" + depends on PCI + help + This option enables support for the Serverworks OSB4/CSB5/CSB6 and + HT1000 PATA controllers, via the new ATA layer. If unsure, say N. @@ -457,13 +764,31 @@ config PATA_SIL680 If unsure, say N. config PATA_SIS - tristate "SiS PATA support (Experimental)" - depends on PCI && EXPERIMENTAL + tristate "SiS PATA support" + depends on PCI help This option enables support for SiS PATA controllers If unsure, say N. +config PATA_TOSHIBA + tristate "Toshiba Piccolo support (Experimental)" + depends on PCI + help + Support for the Toshiba Piccolo controllers. Currently only the + primary channel is supported by this driver. + + If unsure, say N. + +config PATA_TRIFLEX + tristate "Compaq Triflex PATA support" + depends on PCI + help + Enable support for the Compaq 'Triflex' IDE controller as found + on many Compaq Pentium-Pro systems, via the new ATA layer. + + If unsure, say N. + config PATA_VIA tristate "VIA PATA support" depends on PCI @@ -473,6 +798,17 @@ config PATA_VIA If unsure, say N. +config PATA_PXA + tristate "PXA DMA-capable PATA support" + depends on ARCH_PXA + help + This option enables support for harddrive attached to PXA CPU's bus. + + NOTE: This driver utilizes PXA DMA controller, in case your hardware + is not capable of doing MWDMA, use pata_platform instead. + + If unsure, say N. + config PATA_WINBOND tristate "Winbond SL82C105 PATA support" depends on PCI @@ -482,6 +818,189 @@ config PATA_WINBOND If unsure, say N. -endif -endmenu +endif # ATA_BMDMA + +comment "PIO-only SFF controllers" + +config PATA_AT32 + tristate "Atmel AVR32 PATA support (Experimental)" + depends on AVR32 && PLATFORM_AT32AP + help + This option enables support for the IDE devices on the + Atmel AT32AP platform. + + If unsure, say N. + +config PATA_AT91 + tristate "PATA support for AT91SAM9260" + depends on ARM && SOC_AT91SAM9 + help + This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. + + If unsure, say N. + +config PATA_CMD640_PCI + tristate "CMD640 PCI PATA support (Experimental)" + depends on PCI + help + This option enables support for the CMD640 PCI IDE + interface chip. Only the primary channel is currently + supported. + + If unsure, say N. + +config PATA_ISAPNP + tristate "ISA Plug and Play PATA support" + depends on ISAPNP + help + This option enables support for ISA plug & play ATA + controllers such as those found on old soundcards. + + If unsure, say N. + +config PATA_IXP4XX_CF + tristate "IXP4XX Compact Flash support" + depends on ARCH_IXP4XX + help + This option enables support for a Compact Flash connected on + the ixp4xx expansion bus. This driver had been written for + Loft/Avila boards in mind but can work with others. + + If unsure, say N. + +config PATA_MPIIX + tristate "Intel PATA MPIIX support" + depends on PCI + help + This option enables support for MPIIX PATA support. + + If unsure, say N. + +config PATA_NS87410 + tristate "Nat Semi NS87410 PATA support" + depends on PCI + help + This option enables support for the National Semiconductor + NS87410 PCI-IDE controller. + + If unsure, say N. + +config PATA_OPTI + tristate "OPTI621/6215 PATA support (Very Experimental)" + depends on PCI + help + This option enables full PIO support for the early Opti ATA + controllers found on some old motherboards. + + If unsure, say N. + +config PATA_PALMLD + tristate "Palm LifeDrive PATA support" + depends on MACH_PALMLD + help + This option enables support for Palm LifeDrive's internal ATA + port via the new ATA layer. + + If unsure, say N. + +config PATA_PCMCIA + tristate "PCMCIA PATA support" + depends on PCMCIA + help + This option enables support for PCMCIA ATA interfaces, including + compact flash card adapters via the new ATA layer. + + If unsure, say N. + +config PATA_PLATFORM + tristate "Generic platform device PATA support" + depends on EXPERT || PPC || HAVE_PATA_PLATFORM + help + This option enables support for generic directly connected ATA + devices commonly found on embedded systems. + + If unsure, say N. + +config PATA_OF_PLATFORM + tristate "OpenFirmware platform device PATA support" + depends on PATA_PLATFORM && OF + help + This option enables support for generic directly connected ATA + devices commonly found on embedded systems with OpenFirmware + bindings. + + If unsure, say N. + +config PATA_QDI + tristate "QDI VLB PATA support" + depends on ISA + select PATA_LEGACY + help + Support for QDI 6500 and 6580 PATA controllers on VESA local bus. + +config PATA_RB532 + tristate "RouterBoard 532 PATA CompactFlash support" + depends on MIKROTIK_RB532 + help + This option enables support for the RouterBoard 532 + PATA CompactFlash controller. + + If unsure, say N. + +config PATA_RZ1000 + tristate "PC Tech RZ1000 PATA support" + depends on PCI + help + This option enables basic support for the PC Tech RZ1000/1 + PATA controllers via the new ATA layer + + If unsure, say N. + +config PATA_SAMSUNG_CF + tristate "Samsung SoC PATA support" + depends on SAMSUNG_DEV_IDE + help + This option enables basic support for Samsung's S3C/S5P board + PATA controllers via the new ATA layer + + If unsure, say N. + +config PATA_WINBOND_VLB + tristate "Winbond W83759A VLB PATA support (Experimental)" + depends on ISA + select PATA_LEGACY + help + Support for the Winbond W83759A controller on Vesa Local Bus + systems. + +comment "Generic fallback / legacy drivers" + +config PATA_ACPI + tristate "ACPI firmware driver for PATA" + depends on ATA_ACPI && ATA_BMDMA + help + This option enables an ACPI method driver which drives + motherboard PATA controller interfaces through the ACPI + firmware in the BIOS. This driver can sometimes handle + otherwise unsupported hardware. + +config ATA_GENERIC + tristate "Generic ATA support" + depends on PCI && ATA_BMDMA + help + This option enables support for generic BIOS configured + ATA controllers via the new ATA layer + + If unsure, say N. + +config PATA_LEGACY + tristate "Legacy ISA PATA support (Experimental)" + depends on (ISA || PCI) + help + This option enables support for ISA/VLB/PCI bus legacy PATA + ports and allows them to be accessed via the new ATA layer. + + If unsure, say N. +endif # ATA_SFF +endif # ATA diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 72243a677f9..7e7a77de757 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -1,62 +1,118 @@ obj-$(CONFIG_ATA) += libata.o -obj-$(CONFIG_SATA_AHCI) += ahci.o -obj-$(CONFIG_SATA_SVW) += sata_svw.o +# non-SFF interface +obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o +obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o +obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o +obj-$(CONFIG_SATA_FSL) += sata_fsl.o +obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o +obj-$(CONFIG_SATA_SIL24) += sata_sil24.o +obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o +obj-$(CONFIG_SATA_DWC_PMP) += sata_dwc_pmp.o +obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o +obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o +obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o +obj-$(CONFIG_AHCI_MVEBU) += ahci_mvebu.o libahci.o libahci_platform.o +obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o +obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o +obj-$(CONFIG_AHCI_XGENE) += ahci_xgene.o libahci.o libahci_platform.o + +# SFF w/ custom DMA +obj-$(CONFIG_PDC_ADMA) += pdc_adma.o +obj-$(CONFIG_PATA_ARASAN_CF) += pata_arasan_cf.o +obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o +obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o +obj-$(CONFIG_SATA_SX4) += sata_sx4.o + +# SFF SATA w/ BMDMA obj-$(CONFIG_ATA_PIIX) += ata_piix.o +obj-$(CONFIG_SATA_MV) += sata_mv.o +obj-$(CONFIG_SATA_NV) += sata_nv.o obj-$(CONFIG_SATA_PROMISE) += sata_promise.o -obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o +obj-$(CONFIG_SATA_RCAR) += sata_rcar.o obj-$(CONFIG_SATA_SIL) += sata_sil.o -obj-$(CONFIG_SATA_SIL24) += sata_sil24.o -obj-$(CONFIG_SATA_VIA) += sata_via.o -obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o obj-$(CONFIG_SATA_SIS) += sata_sis.o -obj-$(CONFIG_SATA_SX4) += sata_sx4.o -obj-$(CONFIG_SATA_NV) += sata_nv.o +obj-$(CONFIG_SATA_SVW) += sata_svw.o obj-$(CONFIG_SATA_ULI) += sata_uli.o -obj-$(CONFIG_SATA_MV) += sata_mv.o -obj-$(CONFIG_PDC_ADMA) += pdc_adma.o +obj-$(CONFIG_SATA_VIA) += sata_via.o +obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o +# SFF PATA w/ BMDMA obj-$(CONFIG_PATA_ALI) += pata_ali.o obj-$(CONFIG_PATA_AMD) += pata_amd.o obj-$(CONFIG_PATA_ARTOP) += pata_artop.o obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o +obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o +obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o obj-$(CONFIG_PATA_CS5535) += pata_cs5535.o +obj-$(CONFIG_PATA_CS5536) += pata_cs5536.o obj-$(CONFIG_PATA_CYPRESS) += pata_cypress.o obj-$(CONFIG_PATA_EFAR) += pata_efar.o +obj-$(CONFIG_PATA_EP93XX) += pata_ep93xx.o obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o -obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o +obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o +obj-$(CONFIG_PATA_IMX) += pata_imx.o +obj-$(CONFIG_PATA_IT8213) += pata_it8213.o obj-$(CONFIG_PATA_IT821X) += pata_it821x.o obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o +obj-$(CONFIG_PATA_MACIO) += pata_macio.o +obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o +obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o -obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o -obj-$(CONFIG_PATA_OPTI) += pata_opti.o -obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o -obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o +obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o +obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o -obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o +obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o -obj-$(CONFIG_PATA_QDI) += pata_qdi.o obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o -obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o +obj-$(CONFIG_PATA_RDC) += pata_rdc.o obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o +obj-$(CONFIG_PATA_SCC) += pata_scc.o +obj-$(CONFIG_PATA_SCH) += pata_sch.o obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o obj-$(CONFIG_PATA_SIL680) += pata_sil680.o -obj-$(CONFIG_PATA_VIA) += pata_via.o -obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o obj-$(CONFIG_PATA_SIS) += pata_sis.o +obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o +obj-$(CONFIG_PATA_VIA) += pata_via.o +obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o + +# SFF PIO only +obj-$(CONFIG_PATA_AT32) += pata_at32.o +obj-$(CONFIG_PATA_AT91) += pata_at91.o +obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o +obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o +obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o +obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o +obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o +obj-$(CONFIG_PATA_OPTI) += pata_opti.o +obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o +obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o +obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o +obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o +obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o +obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o +obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o + +obj-$(CONFIG_PATA_PXA) += pata_pxa.o + +# Should be last but two libata driver +obj-$(CONFIG_PATA_ACPI) += pata_acpi.o # Should be last but one libata driver obj-$(CONFIG_ATA_GENERIC) += ata_generic.o # Should be last libata driver obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o -libata-objs := libata-core.o libata-scsi.o libata-sff.o libata-eh.o - +libata-y := libata-core.o libata-scsi.o libata-eh.o libata-transport.o +libata-$(CONFIG_ATA_SFF) += libata-sff.o +libata-$(CONFIG_SATA_PMP) += libata-pmp.o +libata-$(CONFIG_ATA_ACPI) += libata-acpi.o +libata-$(CONFIG_SATA_ZPODD) += libata-zpodd.o diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c new file mode 100644 index 00000000000..0cd7c7a39e5 --- /dev/null +++ b/drivers/ata/acard-ahci.c @@ -0,0 +1,511 @@ + +/* + * acard-ahci.c - ACard AHCI SATA support + * + * Maintained by: Tejun Heo <tj@kernel.org> + * Please ALWAYS copy linux-ide@vger.kernel.org + * on emails. + * + * Copyright 2010 Red Hat, Inc. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/DocBook/libata.* + * + * AHCI hardware documentation: + * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf + * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/device.h> +#include <linux/dmi.h> +#include <linux/gfp.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> +#include <linux/libata.h> +#include "ahci.h" + +#define DRV_NAME "acard-ahci" +#define DRV_VERSION "1.0" + +/* + Received FIS structure limited to 80h. +*/ + +#define ACARD_AHCI_RX_FIS_SZ 128 + +enum { + AHCI_PCI_BAR = 5, +}; + +enum board_ids { + board_acard_ahci, +}; + +struct acard_sg { + __le32 addr; + __le32 addr_hi; + __le32 reserved; + __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */ +}; + +static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); +static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); +static int acard_ahci_port_start(struct ata_port *ap); +static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); + +#ifdef CONFIG_PM_SLEEP +static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); +static int acard_ahci_pci_device_resume(struct pci_dev *pdev); +#endif + +static struct scsi_host_template acard_ahci_sht = { + AHCI_SHT("acard-ahci"), +}; + +static struct ata_port_operations acard_ops = { + .inherits = &ahci_ops, + .qc_prep = acard_ahci_qc_prep, + .qc_fill_rtf = acard_ahci_qc_fill_rtf, + .port_start = acard_ahci_port_start, +}; + +#define AHCI_HFLAGS(flags) .private_data = (void *)(flags) + +static const struct ata_port_info acard_ahci_port_info[] = { + [board_acard_ahci] = + { + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &acard_ops, + }, +}; + +static const struct pci_device_id acard_ahci_pci_tbl[] = { + /* ACard */ + { PCI_VDEVICE(ARTOP, 0x000d), board_acard_ahci }, /* ATP8620 */ + + { } /* terminate list */ +}; + +static struct pci_driver acard_ahci_pci_driver = { + .name = DRV_NAME, + .id_table = acard_ahci_pci_tbl, + .probe = acard_ahci_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = acard_ahci_pci_device_suspend, + .resume = acard_ahci_pci_device_resume, +#endif +}; + +#ifdef CONFIG_PM_SLEEP +static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) +{ + struct ata_host *host = pci_get_drvdata(pdev); + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 ctl; + + if (mesg.event & PM_EVENT_SUSPEND && + hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { + dev_err(&pdev->dev, + "BIOS update required for suspend/resume\n"); + return -EIO; + } + + if (mesg.event & PM_EVENT_SLEEP) { + /* AHCI spec rev1.1 section 8.3.3: + * Software must disable interrupts prior to requesting a + * transition of the HBA to D3 state. + */ + ctl = readl(mmio + HOST_CTL); + ctl &= ~HOST_IRQ_EN; + writel(ctl, mmio + HOST_CTL); + readl(mmio + HOST_CTL); /* flush */ + } + + return ata_pci_device_suspend(pdev, mesg); +} + +static int acard_ahci_pci_device_resume(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { + rc = ahci_reset_controller(host); + if (rc) + return rc; + + ahci_init_controller(host); + } + + ata_host_resume(host); + + return 0; +} +#endif + +static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) +{ + int rc; + + if (using_dac && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, + "64-bit DMA enable failed\n"); + return rc; + } + } + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, "32-bit DMA enable failed\n"); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, + "32-bit consistent DMA enable failed\n"); + return rc; + } + } + return 0; +} + +static void acard_ahci_pci_print_info(struct ata_host *host) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + u16 cc; + const char *scc_s; + + pci_read_config_word(pdev, 0x0a, &cc); + if (cc == PCI_CLASS_STORAGE_IDE) + scc_s = "IDE"; + else if (cc == PCI_CLASS_STORAGE_SATA) + scc_s = "SATA"; + else if (cc == PCI_CLASS_STORAGE_RAID) + scc_s = "RAID"; + else + scc_s = "unknown"; + + ahci_print_info(host, scc_s); +} + +static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) +{ + struct scatterlist *sg; + struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; + unsigned int si, last_si = 0; + + VPRINTK("ENTER\n"); + + /* + * Next, the S/G list. + */ + for_each_sg(qc->sg, sg, qc->n_elem, si) { + dma_addr_t addr = sg_dma_address(sg); + u32 sg_len = sg_dma_len(sg); + + /* + * ACard note: + * We must set an end-of-table (EOT) bit, + * and the segment cannot exceed 64k (0x10000) + */ + acard_sg[si].addr = cpu_to_le32(addr & 0xffffffff); + acard_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); + acard_sg[si].size = cpu_to_le32(sg_len); + last_si = si; + } + + acard_sg[last_si].size |= cpu_to_le32(1 << 31); /* set EOT */ + + return si; +} + +static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ahci_port_priv *pp = ap->private_data; + int is_atapi = ata_is_atapi(qc->tf.protocol); + void *cmd_tbl; + u32 opts; + const u32 cmd_fis_len = 5; /* five dwords */ + unsigned int n_elem; + + /* + * Fill in command table information. First, the header, + * a SATA Register - Host to Device command FIS. + */ + cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; + + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); + if (is_atapi) { + memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); + memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); + } + + n_elem = 0; + if (qc->flags & ATA_QCFLAG_DMAMAP) + n_elem = acard_ahci_fill_sg(qc, cmd_tbl); + + /* + * Fill in command slot information. + * + * ACard note: prd table length not filled in + */ + opts = cmd_fis_len | (qc->dev->link->pmp << 12); + if (qc->tf.flags & ATA_TFLAG_WRITE) + opts |= AHCI_CMD_WRITE; + if (is_atapi) + opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; + + ahci_fill_cmd_slot(pp, qc->tag, opts); +} + +static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) +{ + struct ahci_port_priv *pp = qc->ap->private_data; + u8 *rx_fis = pp->rx_fis; + + if (pp->fbs_enabled) + rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ; + + /* + * After a successful execution of an ATA PIO data-in command, + * the device doesn't send D2H Reg FIS to update the TF and + * the host should take TF and E_Status from the preceding PIO + * Setup FIS. + */ + if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && + !(qc->flags & ATA_QCFLAG_FAILED)) { + ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); + qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; + } else + ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); + + return true; +} + +static int acard_ahci_port_start(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct device *dev = ap->host->dev; + struct ahci_port_priv *pp; + void *mem; + dma_addr_t mem_dma; + size_t dma_sz, rx_fis_sz; + + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; + + /* check FBS capability */ + if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) { + void __iomem *port_mmio = ahci_port_base(ap); + u32 cmd = readl(port_mmio + PORT_CMD); + if (cmd & PORT_CMD_FBSCP) + pp->fbs_supported = true; + else if (hpriv->flags & AHCI_HFLAG_YES_FBS) { + dev_info(dev, "port %d can do FBS, forcing FBSCP\n", + ap->port_no); + pp->fbs_supported = true; + } else + dev_warn(dev, "port %d is not capable of FBS\n", + ap->port_no); + } + + if (pp->fbs_supported) { + dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ; + rx_fis_sz = ACARD_AHCI_RX_FIS_SZ * 16; + } else { + dma_sz = AHCI_PORT_PRIV_DMA_SZ; + rx_fis_sz = ACARD_AHCI_RX_FIS_SZ; + } + + mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL); + if (!mem) + return -ENOMEM; + memset(mem, 0, dma_sz); + + /* + * First item in chunk of DMA memory: 32-slot command table, + * 32 bytes each in size + */ + pp->cmd_slot = mem; + pp->cmd_slot_dma = mem_dma; + + mem += AHCI_CMD_SLOT_SZ; + mem_dma += AHCI_CMD_SLOT_SZ; + + /* + * Second item: Received-FIS area + */ + pp->rx_fis = mem; + pp->rx_fis_dma = mem_dma; + + mem += rx_fis_sz; + mem_dma += rx_fis_sz; + + /* + * Third item: data area for storing a single command + * and its scatter-gather table + */ + pp->cmd_tbl = mem; + pp->cmd_tbl_dma = mem_dma; + + /* + * Save off initial list of interrupts to be enabled. + * This could be changed later + */ + pp->intr_mask = DEF_PORT_IRQ; + + ap->private_data = pp; + + /* engage engines, captain */ + return ahci_port_resume(ap); +} + +static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned int board_id = ent->driver_data; + struct ata_port_info pi = acard_ahci_port_info[board_id]; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct device *dev = &pdev->dev; + struct ahci_host_priv *hpriv; + struct ata_host *host; + int n_ports, i, rc; + + VPRINTK("ENTER\n"); + + WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS); + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + /* acquire resources */ + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + /* AHCI controllers often implement SFF compatible interface. + * Grab all PCI BARs just in case. + */ + rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + hpriv->flags |= (unsigned long)pi.private_data; + + if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) + pci_enable_msi(pdev); + + hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; + + /* save initial config */ + ahci_save_initial_config(&pdev->dev, hpriv, 0, 0); + + /* prepare host */ + if (hpriv->cap & HOST_CAP_NCQ) + pi.flags |= ATA_FLAG_NCQ; + + if (hpriv->cap & HOST_CAP_PMP) + pi.flags |= ATA_FLAG_PMP; + + ahci_set_em_messages(hpriv, &pi); + + /* CAP.NP sometimes indicate the index of the last enabled + * port, at other times, that of the last possible port, so + * determining the maximum port number requires looking at + * both CAP.NP and port_map. + */ + n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); + + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); + if (!host) + return -ENOMEM; + host->private_data = hpriv; + + if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) + host->flags |= ATA_HOST_PARALLEL_SCAN; + else + printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); + ata_port_pbar_desc(ap, AHCI_PCI_BAR, + 0x100 + ap->port_no * 0x80, "port"); + + /* set initial link pm policy */ + /* + ap->pm_policy = NOT_AVAILABLE; + */ + /* disabled/not-implemented port */ + if (!(hpriv->port_map & (1 << i))) + ap->ops = &ata_dummy_port_ops; + } + + /* initialize adapter */ + rc = acard_ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); + if (rc) + return rc; + + rc = ahci_reset_controller(host); + if (rc) + return rc; + + ahci_init_controller(host); + acard_ahci_pci_print_info(host); + + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, + &acard_ahci_sht); +} + +module_pci_driver(acard_ahci_pci_driver); + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("ACard AHCI SATA low-level driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 1aabc81d82f..4cd52a4541a 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1,7 +1,7 @@ /* * ahci.c - AHCI SATA support * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -35,340 +35,447 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> -#include <linux/sched.h> #include <linux/dma-mapping.h> #include <linux/device.h> +#include <linux/dmi.h> +#include <linux/gfp.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> -#include <asm/io.h> +#include "ahci.h" #define DRV_NAME "ahci" -#define DRV_VERSION "2.0" - +#define DRV_VERSION "3.0" enum { - AHCI_PCI_BAR = 5, - AHCI_MAX_SG = 168, /* hardware max is 64K */ - AHCI_DMA_BOUNDARY = 0xffffffff, - AHCI_USE_CLUSTERING = 0, - AHCI_MAX_CMDS = 32, - AHCI_CMD_SZ = 32, - AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, - AHCI_RX_FIS_SZ = 256, - AHCI_CMD_TBL_CDB = 0x40, - AHCI_CMD_TBL_HDR_SZ = 0x80, - AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), - AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, - AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + - AHCI_RX_FIS_SZ, - AHCI_IRQ_ON_SG = (1 << 31), - AHCI_CMD_ATAPI = (1 << 5), - AHCI_CMD_WRITE = (1 << 6), - AHCI_CMD_PREFETCH = (1 << 7), - AHCI_CMD_RESET = (1 << 8), - AHCI_CMD_CLR_BUSY = (1 << 10), - - RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ - RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ - - board_ahci = 0, - board_ahci_vt8251 = 1, - - /* global controller registers */ - HOST_CAP = 0x00, /* host capabilities */ - HOST_CTL = 0x04, /* global host control */ - HOST_IRQ_STAT = 0x08, /* interrupt status */ - HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ - HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ - - /* HOST_CTL bits */ - HOST_RESET = (1 << 0), /* reset controller; self-clear */ - HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ - HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ - - /* HOST_CAP bits */ - HOST_CAP_SSC = (1 << 14), /* Slumber capable */ - HOST_CAP_CLO = (1 << 24), /* Command List Override support */ - HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ - HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ - HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ - - /* registers for each SATA port */ - PORT_LST_ADDR = 0x00, /* command list DMA addr */ - PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ - PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ - PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ - PORT_IRQ_STAT = 0x10, /* interrupt status */ - PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ - PORT_CMD = 0x18, /* port command */ - PORT_TFDATA = 0x20, /* taskfile data */ - PORT_SIG = 0x24, /* device TF signature */ - PORT_CMD_ISSUE = 0x38, /* command issue */ - PORT_SCR = 0x28, /* SATA phy register block */ - PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ - PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ - PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ - PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ - - /* PORT_IRQ_{STAT,MASK} bits */ - PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ - PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ - PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ - PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ - PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ - PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ - PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ - PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ - - PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ - PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ - PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ - PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ - PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ - PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ - PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ - PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ - PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ - - PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | - PORT_IRQ_IF_ERR | - PORT_IRQ_CONNECT | - PORT_IRQ_PHYRDY | - PORT_IRQ_UNK_FIS, - PORT_IRQ_ERROR = PORT_IRQ_FREEZE | - PORT_IRQ_TF_ERR | - PORT_IRQ_HBUS_DATA_ERR, - DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | - PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | - PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, - - /* PORT_CMD bits */ - PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ - PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ - PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ - PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ - PORT_CMD_CLO = (1 << 3), /* Command list override */ - PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ - PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ - PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ - - PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ - PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ - PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ - PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ - - /* hpriv->flags bits */ - AHCI_FLAG_MSI = (1 << 0), - - /* ap->flags bits */ - AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24), - AHCI_FLAG_NO_NCQ = (1 << 25), -}; - -struct ahci_cmd_hdr { - u32 opts; - u32 status; - u32 tbl_addr; - u32 tbl_addr_hi; - u32 reserved[4]; -}; - -struct ahci_sg { - u32 addr; - u32 addr_hi; - u32 reserved; - u32 flags_size; -}; - -struct ahci_host_priv { - unsigned long flags; - u32 cap; /* cache of HOST_CAP register */ - u32 port_map; /* cache of HOST_PORTS_IMPL reg */ + AHCI_PCI_BAR_STA2X11 = 0, + AHCI_PCI_BAR_ENMOTUS = 2, + AHCI_PCI_BAR_STANDARD = 5, }; -struct ahci_port_priv { - struct ahci_cmd_hdr *cmd_slot; - dma_addr_t cmd_slot_dma; - void *cmd_tbl; - dma_addr_t cmd_tbl_dma; - void *rx_fis; - dma_addr_t rx_fis_dma; +enum board_ids { + /* board IDs by feature in alphabetical order */ + board_ahci, + board_ahci_ign_iferr, + board_ahci_noncq, + board_ahci_nosntf, + board_ahci_yes_fbs, + + /* board IDs for specific chipsets in alphabetical order */ + board_ahci_mcp65, + board_ahci_mcp77, + board_ahci_mcp89, + board_ahci_mv, + board_ahci_sb600, + board_ahci_sb700, /* for SB700 and SB800 */ + board_ahci_vt8251, + + /* aliases */ + board_ahci_mcp_linux = board_ahci_mcp65, + board_ahci_mcp67 = board_ahci_mcp65, + board_ahci_mcp73 = board_ahci_mcp65, + board_ahci_mcp79 = board_ahci_mcp77, }; -static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); -static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); -static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); -static void ahci_irq_clear(struct ata_port *ap); -static int ahci_port_start(struct ata_port *ap); -static void ahci_port_stop(struct ata_port *ap); -static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); -static void ahci_qc_prep(struct ata_queued_cmd *qc); -static u8 ahci_check_status(struct ata_port *ap); -static void ahci_freeze(struct ata_port *ap); -static void ahci_thaw(struct ata_port *ap); -static void ahci_error_handler(struct ata_port *ap); -static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); -static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); -static int ahci_port_resume(struct ata_port *ap); +static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static void ahci_mcp89_apple_enable(struct pci_dev *pdev); +static bool is_mcp89_apple(struct pci_dev *pdev); +static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +#ifdef CONFIG_PM static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); static int ahci_pci_device_resume(struct pci_dev *pdev); -static void ahci_remove_one (struct pci_dev *pdev); +#endif static struct scsi_host_template ahci_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .change_queue_depth = ata_scsi_change_queue_depth, - .can_queue = AHCI_MAX_CMDS - 1, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = AHCI_MAX_SG, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = AHCI_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = AHCI_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, - .suspend = ata_scsi_device_suspend, - .resume = ata_scsi_device_resume, + AHCI_SHT("ahci"), }; -static const struct ata_port_operations ahci_ops = { - .port_disable = ata_port_disable, - - .check_status = ahci_check_status, - .check_altstatus = ahci_check_status, - .dev_select = ata_noop_dev_select, - - .tf_read = ahci_tf_read, - - .qc_prep = ahci_qc_prep, - .qc_issue = ahci_qc_issue, - - .irq_handler = ahci_interrupt, - .irq_clear = ahci_irq_clear, - - .scr_read = ahci_scr_read, - .scr_write = ahci_scr_write, - - .freeze = ahci_freeze, - .thaw = ahci_thaw, - - .error_handler = ahci_error_handler, - .post_internal_cmd = ahci_post_internal_cmd, - - .port_suspend = ahci_port_suspend, - .port_resume = ahci_port_resume, +static struct ata_port_operations ahci_vt8251_ops = { + .inherits = &ahci_ops, + .hardreset = ahci_vt8251_hardreset, +}; - .port_start = ahci_port_start, - .port_stop = ahci_port_stop, +static struct ata_port_operations ahci_p5wdh_ops = { + .inherits = &ahci_ops, + .hardreset = ahci_p5wdh_hardreset, }; static const struct ata_port_info ahci_port_info[] = { - /* board_ahci */ - { - .sht = &ahci_sht, - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | - ATA_FLAG_SKIP_D2H_BSY, - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + /* by features */ + [board_ahci] = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, + [board_ahci_ign_iferr] = { + AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, + [board_ahci_noncq] = { + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, + [board_ahci_nosntf] = { + AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, + [board_ahci_yes_fbs] = { + AHCI_HFLAGS (AHCI_HFLAG_YES_FBS), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, + /* by chipsets */ + [board_ahci_mcp65] = { + AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | + AHCI_HFLAG_YES_NCQ), + .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, + [board_ahci_mcp77] = { + AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, + [board_ahci_mcp89] = { + AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, - /* board_ahci_vt8251 */ - { - .sht = &ahci_sht, - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | - ATA_FLAG_SKIP_D2H_BSY | - AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ, - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + [board_ahci_mv] = { + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | + AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), + .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, + [board_ahci_sb600] = { + AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | + AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 | + AHCI_HFLAG_32BIT_ONLY), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_pmp_retry_srst_ops, + }, + [board_ahci_sb700] = { /* for SB700 and SB800 */ + AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_pmp_retry_srst_ops, + }, + [board_ahci_vt8251] = { + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_vt8251_ops, + }, }; static const struct pci_device_id ahci_pci_tbl[] = { /* Intel */ - { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ICH6 */ - { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ICH6M */ - { PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ICH7 */ - { PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ICH7M */ - { PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ICH7R */ - { PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ULi M5288 */ - { PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ESB2 */ - { PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ESB2 */ - { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ESB2 */ - { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ICH7-M DH */ - { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ICH8 */ - { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ICH8 */ - { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ICH8 */ - { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ICH8M */ - { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ICH8M */ - - /* JMicron */ - { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* JMicron JMB360 */ - { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* JMicron JMB361 */ - { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* JMicron JMB363 */ - { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* JMicron JMB365 */ - { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* JMicron JMB366 */ + { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */ + { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */ + { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */ + { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */ + { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */ + { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */ + { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */ + { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ + { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ + { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ + { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ + { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */ + { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ + { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ + { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ + { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ + { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ + { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ + { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ + { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ + { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ + { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ + { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ + { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ + { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */ + { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ + { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */ + { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ + { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ + { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ + { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ + { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ + { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ + { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ + { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ + { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ + { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ + { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ + { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ + { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */ + { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ + { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ + { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ + { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */ + { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */ + { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */ + { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */ + { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */ + { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */ + { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */ + { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ + { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */ + { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */ + { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ + + /* JMicron 360/1/3/5/6, match class to avoid IDE function */ + { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, + /* JMicron 362B and 362C have an AHCI function with IDE class code */ + { PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr }, + { PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr }, /* ATI */ - { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ATI SB600 non-raid */ - { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* ATI SB600 raid */ + { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ + { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */ + { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */ + { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */ + { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */ + { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */ + { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ + + /* AMD */ + { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ + { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */ + /* AMD is using RAID class only for ahci controllers */ + { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, /* VIA */ - { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci_vt8251 }, /* VIA VT8251 */ + { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ + { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ /* NVIDIA */ - { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* MCP65 */ - { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* MCP65 */ - { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* MCP65 */ - { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */ /* SiS */ - { PCI_VENDOR_ID_SI, 0x1184, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* SiS 966 */ - { PCI_VENDOR_ID_SI, 0x1185, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* SiS 966 */ - { PCI_VENDOR_ID_SI, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_ahci }, /* SiS 968 */ + { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ + { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */ + { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ + + /* ST Microelectronics */ + { PCI_VDEVICE(STMICRO, 0xCC06), board_ahci }, /* ST ConneXt */ + + /* Marvell */ + { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ + { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9123), + .class = PCI_CLASS_STORAGE_SATA_AHCI, + .class_mask = 0xffffff, + .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125), + .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178, + PCI_VENDOR_ID_MARVELL_EXT, 0x9170), + .driver_data = board_ahci_yes_fbs }, /* 88se9170 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0), + .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), + .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), + .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), + .driver_data = board_ahci_yes_fbs }, + + /* Promise */ + { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ + { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */ + + /* Asmedia */ + { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ + { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ + + /* + * Samsung SSDs found on some macbooks. NCQ times out. + * https://bugzilla.kernel.org/show_bug.cgi?id=60731 + */ + { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq }, + + /* Enmotus */ + { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, + + /* Generic, PCI class code for AHCI */ + { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, { } /* terminate list */ }; @@ -378,1307 +485,1024 @@ static struct pci_driver ahci_pci_driver = { .name = DRV_NAME, .id_table = ahci_pci_tbl, .probe = ahci_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM .suspend = ahci_pci_device_suspend, .resume = ahci_pci_device_resume, - .remove = ahci_remove_one, +#endif }; +#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) +static int marvell_enable; +#else +static int marvell_enable = 1; +#endif +module_param(marvell_enable, int, 0644); +MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); -static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port) -{ - return base + 0x100 + (port * 0x80); -} - -static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port) -{ - return (void __iomem *) ahci_port_base_ul((unsigned long)base, port); -} - -static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in) -{ - unsigned int sc_reg; - - switch (sc_reg_in) { - case SCR_STATUS: sc_reg = 0; break; - case SCR_CONTROL: sc_reg = 1; break; - case SCR_ERROR: sc_reg = 2; break; - case SCR_ACTIVE: sc_reg = 3; break; - default: - return 0xffffffffU; - } - - return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); -} - - -static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in, - u32 val) -{ - unsigned int sc_reg; - - switch (sc_reg_in) { - case SCR_STATUS: sc_reg = 0; break; - case SCR_CONTROL: sc_reg = 1; break; - case SCR_ERROR: sc_reg = 2; break; - case SCR_ACTIVE: sc_reg = 3; break; - default: - return; - } - - writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); -} - -static void ahci_start_engine(void __iomem *port_mmio) -{ - u32 tmp; - - /* start DMA */ - tmp = readl(port_mmio + PORT_CMD); - tmp |= PORT_CMD_START; - writel(tmp, port_mmio + PORT_CMD); - readl(port_mmio + PORT_CMD); /* flush */ -} - -static int ahci_stop_engine(void __iomem *port_mmio) -{ - u32 tmp; - - tmp = readl(port_mmio + PORT_CMD); - - /* check if the HBA is idle */ - if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) - return 0; - - /* setting HBA to idle */ - tmp &= ~PORT_CMD_START; - writel(tmp, port_mmio + PORT_CMD); - - /* wait for engine to stop. This could be as long as 500 msec */ - tmp = ata_wait_register(port_mmio + PORT_CMD, - PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); - if (tmp & PORT_CMD_LIST_ON) - return -EIO; - - return 0; -} - -static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap, - dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma) -{ - u32 tmp; - - /* set FIS registers */ - if (cap & HOST_CAP_64) - writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI); - writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); - - if (cap & HOST_CAP_64) - writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI); - writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); - - /* enable FIS reception */ - tmp = readl(port_mmio + PORT_CMD); - tmp |= PORT_CMD_FIS_RX; - writel(tmp, port_mmio + PORT_CMD); - - /* flush */ - readl(port_mmio + PORT_CMD); -} - -static int ahci_stop_fis_rx(void __iomem *port_mmio) -{ - u32 tmp; - - /* disable FIS reception */ - tmp = readl(port_mmio + PORT_CMD); - tmp &= ~PORT_CMD_FIS_RX; - writel(tmp, port_mmio + PORT_CMD); - - /* wait for completion, spec says 500ms, give it 1000 */ - tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, - PORT_CMD_FIS_ON, 10, 1000); - if (tmp & PORT_CMD_FIS_ON) - return -EBUSY; - - return 0; -} - -static void ahci_power_up(void __iomem *port_mmio, u32 cap) -{ - u32 cmd; - - cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; - - /* spin up device */ - if (cap & HOST_CAP_SSS) { - cmd |= PORT_CMD_SPIN_UP; - writel(cmd, port_mmio + PORT_CMD); - } - - /* wake up link */ - writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); -} - -static void ahci_power_down(void __iomem *port_mmio, u32 cap) -{ - u32 cmd, scontrol; - - cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; - - if (cap & HOST_CAP_SSC) { - /* enable transitions to slumber mode */ - scontrol = readl(port_mmio + PORT_SCR_CTL); - if ((scontrol & 0x0f00) > 0x100) { - scontrol &= ~0xf00; - writel(scontrol, port_mmio + PORT_SCR_CTL); - } - - /* put device into slumber mode */ - writel(cmd | PORT_CMD_ICC_SLUMBER, port_mmio + PORT_CMD); - - /* wait for the transition to complete */ - ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_ICC_SLUMBER, - PORT_CMD_ICC_SLUMBER, 1, 50); - } - - /* put device into listen mode */ - if (cap & HOST_CAP_SSS) { - /* first set PxSCTL.DET to 0 */ - scontrol = readl(port_mmio + PORT_SCR_CTL); - scontrol &= ~0xf; - writel(scontrol, port_mmio + PORT_SCR_CTL); - - /* then set PxCMD.SUD to 0 */ - cmd &= ~PORT_CMD_SPIN_UP; - writel(cmd, port_mmio + PORT_CMD); - } -} - -static void ahci_init_port(void __iomem *port_mmio, u32 cap, - dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma) -{ - /* power up */ - ahci_power_up(port_mmio, cap); - - /* enable FIS reception */ - ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma); - - /* enable DMA */ - ahci_start_engine(port_mmio); -} -static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg) +static void ahci_pci_save_initial_config(struct pci_dev *pdev, + struct ahci_host_priv *hpriv) { - int rc; + unsigned int force_port_map = 0; + unsigned int mask_port_map = 0; - /* disable DMA */ - rc = ahci_stop_engine(port_mmio); - if (rc) { - *emsg = "failed to stop engine"; - return rc; + if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { + dev_info(&pdev->dev, "JMB361 has only one port\n"); + force_port_map = 1; } - /* disable FIS reception */ - rc = ahci_stop_fis_rx(port_mmio); - if (rc) { - *emsg = "failed stop FIS RX"; - return rc; + /* + * Temporary Marvell 6145 hack: PATA port presence + * is asserted through the standard AHCI port + * presence register, as bit 4 (counting from 0) + */ + if (hpriv->flags & AHCI_HFLAG_MV_PATA) { + if (pdev->device == 0x6121) + mask_port_map = 0x3; + else + mask_port_map = 0xf; + dev_info(&pdev->dev, + "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); } - /* put device into slumber mode */ - ahci_power_down(port_mmio, cap); - - return 0; + ahci_save_initial_config(&pdev->dev, hpriv, force_port_map, + mask_port_map); } -static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev) +static int ahci_pci_reset_controller(struct ata_host *host) { - u32 cap_save, tmp; - - cap_save = readl(mmio + HOST_CAP); - cap_save &= ( (1<<28) | (1<<17) ); - cap_save |= (1 << 27); + struct pci_dev *pdev = to_pci_dev(host->dev); - /* global controller reset */ - tmp = readl(mmio + HOST_CTL); - if ((tmp & HOST_RESET) == 0) { - writel(tmp | HOST_RESET, mmio + HOST_CTL); - readl(mmio + HOST_CTL); /* flush */ - } - - /* reset must complete within 1 second, or - * the hardware should be considered fried. - */ - ssleep(1); - - tmp = readl(mmio + HOST_CTL); - if (tmp & HOST_RESET) { - dev_printk(KERN_ERR, &pdev->dev, - "controller reset failed (0x%x)\n", tmp); - return -EIO; - } - - writel(HOST_AHCI_EN, mmio + HOST_CTL); - (void) readl(mmio + HOST_CTL); /* flush */ - writel(cap_save, mmio + HOST_CAP); - writel(0xf, mmio + HOST_PORTS_IMPL); - (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ + ahci_reset_controller(host); if (pdev->vendor == PCI_VENDOR_ID_INTEL) { + struct ahci_host_priv *hpriv = host->private_data; u16 tmp16; /* configure PCS */ pci_read_config_word(pdev, 0x92, &tmp16); - tmp16 |= 0xf; - pci_write_config_word(pdev, 0x92, tmp16); + if ((tmp16 & hpriv->port_map) != hpriv->port_map) { + tmp16 |= hpriv->port_map; + pci_write_config_word(pdev, 0x92, tmp16); + } } return 0; } -static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev, - int n_ports, u32 cap) +static void ahci_pci_init_controller(struct ata_host *host) { - int i, rc; + struct ahci_host_priv *hpriv = host->private_data; + struct pci_dev *pdev = to_pci_dev(host->dev); + void __iomem *port_mmio; u32 tmp; + int mv; - for (i = 0; i < n_ports; i++) { - void __iomem *port_mmio = ahci_port_base(mmio, i); - const char *emsg = NULL; - -#if 0 /* BIOSen initialize this incorrectly */ - if (!(hpriv->port_map & (1 << i))) - continue; -#endif - - /* make sure port is not active */ - rc = ahci_deinit_port(port_mmio, cap, &emsg); - if (rc) - dev_printk(KERN_WARNING, &pdev->dev, - "%s (%d)\n", emsg, rc); + if (hpriv->flags & AHCI_HFLAG_MV_PATA) { + if (pdev->device == 0x6121) + mv = 2; + else + mv = 4; + port_mmio = __ahci_port_base(host, mv); - /* clear SError */ - tmp = readl(port_mmio + PORT_SCR_ERR); - VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); - writel(tmp, port_mmio + PORT_SCR_ERR); + writel(0, port_mmio + PORT_IRQ_MASK); /* clear port IRQ */ tmp = readl(port_mmio + PORT_IRQ_STAT); VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); if (tmp) writel(tmp, port_mmio + PORT_IRQ_STAT); - - writel(1 << i, mmio + HOST_IRQ_STAT); } - tmp = readl(mmio + HOST_CTL); - VPRINTK("HOST_CTL 0x%x\n", tmp); - writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); - tmp = readl(mmio + HOST_CTL); - VPRINTK("HOST_CTL 0x%x\n", tmp); -} - -static unsigned int ahci_dev_classify(struct ata_port *ap) -{ - void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; - struct ata_taskfile tf; - u32 tmp; - - tmp = readl(port_mmio + PORT_SIG); - tf.lbah = (tmp >> 24) & 0xff; - tf.lbam = (tmp >> 16) & 0xff; - tf.lbal = (tmp >> 8) & 0xff; - tf.nsect = (tmp) & 0xff; - - return ata_dev_classify(&tf); -} - -static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, - u32 opts) -{ - dma_addr_t cmd_tbl_dma; - - cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; - - pp->cmd_slot[tag].opts = cpu_to_le32(opts); - pp->cmd_slot[tag].status = 0; - pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); - pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); + ahci_init_controller(host); } -static int ahci_clo(struct ata_port *ap) +static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) { - void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; + struct ata_port *ap = link->ap; struct ahci_host_priv *hpriv = ap->host->private_data; - u32 tmp; - - if (!(hpriv->cap & HOST_CAP_CLO)) - return -EOPNOTSUPP; - - tmp = readl(port_mmio + PORT_CMD); - tmp |= PORT_CMD_CLO; - writel(tmp, port_mmio + PORT_CMD); - - tmp = ata_wait_register(port_mmio + PORT_CMD, - PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); - if (tmp & PORT_CMD_CLO) - return -EIO; - - return 0; -} - -static int ahci_prereset(struct ata_port *ap) -{ - if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) && - (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) { - /* ATA_BUSY hasn't cleared, so send a CLO */ - ahci_clo(ap); - } - - return ata_std_prereset(ap); -} - -static int ahci_softreset(struct ata_port *ap, unsigned int *class) -{ - struct ahci_port_priv *pp = ap->private_data; - void __iomem *mmio = ap->host->mmio_base; - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); - const u32 cmd_fis_len = 5; /* five dwords */ - const char *reason = NULL; - struct ata_taskfile tf; - u32 tmp; - u8 *fis; + bool online; int rc; DPRINTK("ENTER\n"); - if (ata_port_offline(ap)) { - DPRINTK("PHY reports no device\n"); - *class = ATA_DEV_NONE; - return 0; - } - - /* prepare for SRST (AHCI-1.1 10.4.1) */ - rc = ahci_stop_engine(port_mmio); - if (rc) { - reason = "failed to stop engine"; - goto fail_restart; - } - - /* check BUSY/DRQ, perform Command List Override if necessary */ - ahci_tf_read(ap, &tf); - if (tf.command & (ATA_BUSY | ATA_DRQ)) { - rc = ahci_clo(ap); - - if (rc == -EOPNOTSUPP) { - reason = "port busy but CLO unavailable"; - goto fail_restart; - } else if (rc) { - reason = "port busy but CLO failed"; - goto fail_restart; - } - } - - /* restart engine */ - ahci_start_engine(port_mmio); - - ata_tf_init(ap->device, &tf); - fis = pp->cmd_tbl; - - /* issue the first D2H Register FIS */ - ahci_fill_cmd_slot(pp, 0, - cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY); - - tf.ctl |= ATA_SRST; - ata_tf_to_fis(&tf, fis, 0); - fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ - - writel(1, port_mmio + PORT_CMD_ISSUE); + ahci_stop_engine(ap); - tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500); - if (tmp & 0x1) { - rc = -EIO; - reason = "1st FIS failed"; - goto fail; - } - - /* spec says at least 5us, but be generous and sleep for 1ms */ - msleep(1); + rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), + deadline, &online, NULL); - /* issue the second D2H Register FIS */ - ahci_fill_cmd_slot(pp, 0, cmd_fis_len); + hpriv->start_engine(ap); - tf.ctl &= ~ATA_SRST; - ata_tf_to_fis(&tf, fis, 0); - fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ - - writel(1, port_mmio + PORT_CMD_ISSUE); - readl(port_mmio + PORT_CMD_ISSUE); /* flush */ + DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); - /* spec mandates ">= 2ms" before checking status. - * We wait 150ms, because that was the magic delay used for - * ATAPI devices in Hale Landis's ATADRVR, for the period of time - * between when the ATA command register is written, and then - * status is checked. Because waiting for "a while" before - * checking status is fine, post SRST, we perform this magic - * delay here as well. + /* vt8251 doesn't clear BSY on signature FIS reception, + * request follow-up softreset. */ - msleep(150); - - *class = ATA_DEV_NONE; - if (ata_port_online(ap)) { - if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { - rc = -EIO; - reason = "device not ready"; - goto fail; - } - *class = ahci_dev_classify(ap); - } - - DPRINTK("EXIT, class=%u\n", *class); - return 0; - - fail_restart: - ahci_start_engine(port_mmio); - fail: - ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); - return rc; + return online ? -EAGAIN : rc; } -static int ahci_hardreset(struct ata_port *ap, unsigned int *class) +static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) { + struct ata_port *ap = link->ap; struct ahci_port_priv *pp = ap->private_data; + struct ahci_host_priv *hpriv = ap->host->private_data; u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; struct ata_taskfile tf; - void __iomem *mmio = ap->host->mmio_base; - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); + bool online; int rc; - DPRINTK("ENTER\n"); - - ahci_stop_engine(port_mmio); + ahci_stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ - ata_tf_init(ap->device, &tf); - tf.command = 0xff; - ata_tf_to_fis(&tf, d2h_fis, 0); - - rc = sata_std_hardreset(ap, class); - - ahci_start_engine(port_mmio); - - if (rc == 0 && ata_port_online(ap)) - *class = ahci_dev_classify(ap); - if (*class == ATA_DEV_UNKNOWN) - *class = ATA_DEV_NONE; - - DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); + ata_tf_init(link->device, &tf); + tf.command = ATA_BUSY; + ata_tf_to_fis(&tf, 0, 0, d2h_fis); + + rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), + deadline, &online, NULL); + + hpriv->start_engine(ap); + + /* The pseudo configuration device on SIMG4726 attached to + * ASUS P5W-DH Deluxe doesn't send signature FIS after + * hardreset if no device is attached to the first downstream + * port && the pseudo device locks up on SRST w/ PMP==0. To + * work around this, wait for !BSY only briefly. If BSY isn't + * cleared, perform CLO and proceed to IDENTIFY (achieved by + * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA). + * + * Wait for two seconds. Devices attached to downstream port + * which can't process the following IDENTIFY after this will + * have to be reset again. For most cases, this should + * suffice while making probing snappish enough. + */ + if (online) { + rc = ata_wait_after_reset(link, jiffies + 2 * HZ, + ahci_check_ready); + if (rc) + ahci_kick_engine(ap); + } return rc; } -static void ahci_postreset(struct ata_port *ap, unsigned int *class) +#ifdef CONFIG_PM +static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { - void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; - u32 new_tmp, tmp; - - ata_std_postreset(ap, class); + struct ata_host *host = pci_get_drvdata(pdev); + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 ctl; - /* Make sure port's ATAPI bit is set appropriately */ - new_tmp = tmp = readl(port_mmio + PORT_CMD); - if (*class == ATA_DEV_ATAPI) - new_tmp |= PORT_CMD_ATAPI; - else - new_tmp &= ~PORT_CMD_ATAPI; - if (new_tmp != tmp) { - writel(new_tmp, port_mmio + PORT_CMD); - readl(port_mmio + PORT_CMD); /* flush */ + if (mesg.event & PM_EVENT_SUSPEND && + hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { + dev_err(&pdev->dev, + "BIOS update required for suspend/resume\n"); + return -EIO; } -} - -static u8 ahci_check_status(struct ata_port *ap) -{ - void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; - - return readl(mmio + PORT_TFDATA) & 0xFF; -} -static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) -{ - struct ahci_port_priv *pp = ap->private_data; - u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; + if (mesg.event & PM_EVENT_SLEEP) { + /* AHCI spec rev1.1 section 8.3.3: + * Software must disable interrupts prior to requesting a + * transition of the HBA to D3 state. + */ + ctl = readl(mmio + HOST_CTL); + ctl &= ~HOST_IRQ_EN; + writel(ctl, mmio + HOST_CTL); + readl(mmio + HOST_CTL); /* flush */ + } - ata_tf_from_fis(d2h_fis, tf); + return ata_pci_device_suspend(pdev, mesg); } -static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) +static int ahci_pci_device_resume(struct pci_dev *pdev) { - struct scatterlist *sg; - struct ahci_sg *ahci_sg; - unsigned int n_sg = 0; + struct ata_host *host = pci_get_drvdata(pdev); + int rc; - VPRINTK("ENTER\n"); + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; - /* - * Next, the S/G list. - */ - ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; - ata_for_each_sg(sg, qc) { - dma_addr_t addr = sg_dma_address(sg); - u32 sg_len = sg_dma_len(sg); + /* Apple BIOS helpfully mangles the registers on resume */ + if (is_mcp89_apple(pdev)) + ahci_mcp89_apple_enable(pdev); - ahci_sg->addr = cpu_to_le32(addr & 0xffffffff); - ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); - ahci_sg->flags_size = cpu_to_le32(sg_len - 1); + if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { + rc = ahci_pci_reset_controller(host); + if (rc) + return rc; - ahci_sg++; - n_sg++; + ahci_pci_init_controller(host); } - return n_sg; + ata_host_resume(host); + + return 0; } +#endif -static void ahci_qc_prep(struct ata_queued_cmd *qc) +static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) { - struct ata_port *ap = qc->ap; - struct ahci_port_priv *pp = ap->private_data; - int is_atapi = is_atapi_taskfile(&qc->tf); - void *cmd_tbl; - u32 opts; - const u32 cmd_fis_len = 5; /* five dwords */ - unsigned int n_elem; + int rc; /* - * Fill in command table information. First, the header, - * a SATA Register - Host to Device command FIS. + * If the device fixup already set the dma_mask to some non-standard + * value, don't extend it here. This happens on STA2X11, for example. */ - cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; + if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32)) + return 0; - ata_tf_to_fis(&qc->tf, cmd_tbl, 0); - if (is_atapi) { - memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); - memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); + if (using_dac && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, + "64-bit DMA enable failed\n"); + return rc; + } + } + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, "32-bit DMA enable failed\n"); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, + "32-bit consistent DMA enable failed\n"); + return rc; + } } - - n_elem = 0; - if (qc->flags & ATA_QCFLAG_DMAMAP) - n_elem = ahci_fill_sg(qc, cmd_tbl); - - /* - * Fill in command slot information. - */ - opts = cmd_fis_len | n_elem << 16; - if (qc->tf.flags & ATA_TFLAG_WRITE) - opts |= AHCI_CMD_WRITE; - if (is_atapi) - opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; - - ahci_fill_cmd_slot(pp, qc->tag, opts); + return 0; } -static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) +static void ahci_pci_print_info(struct ata_host *host) { - struct ahci_port_priv *pp = ap->private_data; - struct ata_eh_info *ehi = &ap->eh_info; - unsigned int err_mask = 0, action = 0; - struct ata_queued_cmd *qc; - u32 serror; - - ata_ehi_clear_desc(ehi); - - /* AHCI needs SError cleared; otherwise, it might lock up */ - serror = ahci_scr_read(ap, SCR_ERROR); - ahci_scr_write(ap, SCR_ERROR, serror); - - /* analyze @irq_stat */ - ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); - - if (irq_stat & PORT_IRQ_TF_ERR) - err_mask |= AC_ERR_DEV; - - if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { - err_mask |= AC_ERR_HOST_BUS; - action |= ATA_EH_SOFTRESET; - } - - if (irq_stat & PORT_IRQ_IF_ERR) { - err_mask |= AC_ERR_ATA_BUS; - action |= ATA_EH_SOFTRESET; - ata_ehi_push_desc(ehi, ", interface fatal error"); - } - - if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { - ata_ehi_hotplugged(ehi); - ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ? - "connection status changed" : "PHY RDY changed"); - } - - if (irq_stat & PORT_IRQ_UNK_FIS) { - u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); - - err_mask |= AC_ERR_HSM; - action |= ATA_EH_SOFTRESET; - ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x", - unk[0], unk[1], unk[2], unk[3]); - } - - /* okay, let's hand over to EH */ - ehi->serror |= serror; - ehi->action |= action; + struct pci_dev *pdev = to_pci_dev(host->dev); + u16 cc; + const char *scc_s; - qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc) - qc->err_mask |= err_mask; + pci_read_config_word(pdev, 0x0a, &cc); + if (cc == PCI_CLASS_STORAGE_IDE) + scc_s = "IDE"; + else if (cc == PCI_CLASS_STORAGE_SATA) + scc_s = "SATA"; + else if (cc == PCI_CLASS_STORAGE_RAID) + scc_s = "RAID"; else - ehi->err_mask |= err_mask; + scc_s = "unknown"; - if (irq_stat & PORT_IRQ_FREEZE) - ata_port_freeze(ap); - else - ata_port_abort(ap); + ahci_print_info(host, scc_s); } -static void ahci_host_intr(struct ata_port *ap) +/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is + * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't + * support PMP and the 4726 either directly exports the device + * attached to the first downstream port or acts as a hardware storage + * controller and emulate a single ATA device (can be RAID 0/1 or some + * other configuration). + * + * When there's no device attached to the first downstream port of the + * 4726, "Config Disk" appears, which is a pseudo ATA device to + * configure the 4726. However, ATA emulation of the device is very + * lame. It doesn't send signature D2H Reg FIS after the initial + * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues. + * + * The following function works around the problem by always using + * hardreset on the port and not depending on receiving signature FIS + * afterward. If signature FIS isn't received soon, ATA class is + * assumed without follow-up softreset. + */ +static void ahci_p5wdh_workaround(struct ata_host *host) { - void __iomem *mmio = ap->host->mmio_base; - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); - struct ata_eh_info *ehi = &ap->eh_info; - u32 status, qc_active; - int rc; - - status = readl(port_mmio + PORT_IRQ_STAT); - writel(status, port_mmio + PORT_IRQ_STAT); - - if (unlikely(status & PORT_IRQ_ERROR)) { - ahci_error_intr(ap, status); - return; + static struct dmi_system_id sysids[] = { + { + .ident = "P5W DH Deluxe", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "ASUSTEK COMPUTER INC"), + DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), + }, + }, + { } + }; + struct pci_dev *pdev = to_pci_dev(host->dev); + + if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) && + dmi_check_system(sysids)) { + struct ata_port *ap = host->ports[1]; + + dev_info(&pdev->dev, + "enabling ASUS P5W DH Deluxe on-board SIMG4726 workaround\n"); + + ap->ops = &ahci_p5wdh_ops; + ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; } - - if (ap->sactive) - qc_active = readl(port_mmio + PORT_SCR_ACT); - else - qc_active = readl(port_mmio + PORT_CMD_ISSUE); - - rc = ata_qc_complete_multiple(ap, qc_active, NULL); - if (rc > 0) - return; - if (rc < 0) { - ehi->err_mask |= AC_ERR_HSM; - ehi->action |= ATA_EH_SOFTRESET; - ata_port_freeze(ap); - return; - } - - /* hmmm... a spurious interupt */ - - /* some devices send D2H reg with I bit set during NCQ command phase */ - if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS) - return; - - /* ignore interim PIO setup fis interrupts */ - if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS)) - return; - - if (ata_ratelimit()) - ata_port_printk(ap, KERN_INFO, "spurious interrupt " - "(irq_stat 0x%x active_tag %d sactive 0x%x)\n", - status, ap->active_tag, ap->sactive); } -static void ahci_irq_clear(struct ata_port *ap) -{ - /* TODO */ -} - -static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs) +/* + * Macbook7,1 firmware forcibly disables MCP89 AHCI and changes PCI ID when + * booting in BIOS compatibility mode. We restore the registers but not ID. + */ +static void ahci_mcp89_apple_enable(struct pci_dev *pdev) { - struct ata_host *host = dev_instance; - struct ahci_host_priv *hpriv; - unsigned int i, handled = 0; - void __iomem *mmio; - u32 irq_stat, irq_ack = 0; - - VPRINTK("ENTER\n"); + u32 val; - hpriv = host->private_data; - mmio = host->mmio_base; + printk(KERN_INFO "ahci: enabling MCP89 AHCI mode\n"); - /* sigh. 0xffffffff is a valid return from h/w */ - irq_stat = readl(mmio + HOST_IRQ_STAT); - irq_stat &= hpriv->port_map; - if (!irq_stat) - return IRQ_NONE; + pci_read_config_dword(pdev, 0xf8, &val); + val |= 1 << 0x1b; + /* the following changes the device ID, but appears not to affect function */ + /* val = (val & ~0xf0000000) | 0x80000000; */ + pci_write_config_dword(pdev, 0xf8, val); - spin_lock(&host->lock); + pci_read_config_dword(pdev, 0x54c, &val); + val |= 1 << 0xc; + pci_write_config_dword(pdev, 0x54c, val); - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap; + pci_read_config_dword(pdev, 0x4a4, &val); + val &= 0xff; + val |= 0x01060100; + pci_write_config_dword(pdev, 0x4a4, val); - if (!(irq_stat & (1 << i))) - continue; - - ap = host->ports[i]; - if (ap) { - ahci_host_intr(ap); - VPRINTK("port %u\n", i); - } else { - VPRINTK("port %u (no irq)\n", i); - if (ata_ratelimit()) - dev_printk(KERN_WARNING, host->dev, - "interrupt on disabled port %u\n", i); - } + pci_read_config_dword(pdev, 0x54c, &val); + val &= ~(1 << 0xc); + pci_write_config_dword(pdev, 0x54c, val); - irq_ack |= (1 << i); - } - - if (irq_ack) { - writel(irq_ack, mmio + HOST_IRQ_STAT); - handled = 1; - } - - spin_unlock(&host->lock); - - VPRINTK("EXIT\n"); - - return IRQ_RETVAL(handled); -} - -static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; - - if (qc->tf.protocol == ATA_PROT_NCQ) - writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); - writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); - readl(port_mmio + PORT_CMD_ISSUE); /* flush */ - - return 0; + pci_read_config_dword(pdev, 0xf8, &val); + val &= ~(1 << 0x1b); + pci_write_config_dword(pdev, 0xf8, val); } -static void ahci_freeze(struct ata_port *ap) +static bool is_mcp89_apple(struct pci_dev *pdev) { - void __iomem *mmio = ap->host->mmio_base; - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); - - /* turn IRQ off */ - writel(0, port_mmio + PORT_IRQ_MASK); + return pdev->vendor == PCI_VENDOR_ID_NVIDIA && + pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA && + pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && + pdev->subsystem_device == 0xcb89; } -static void ahci_thaw(struct ata_port *ap) +/* only some SB600 ahci controllers can do 64bit DMA */ +static bool ahci_sb600_enable_64bit(struct pci_dev *pdev) { - void __iomem *mmio = ap->host->mmio_base; - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); - u32 tmp; - - /* clear IRQ */ - tmp = readl(port_mmio + PORT_IRQ_STAT); - writel(tmp, port_mmio + PORT_IRQ_STAT); - writel(1 << ap->id, mmio + HOST_IRQ_STAT); + static const struct dmi_system_id sysids[] = { + /* + * The oldest version known to be broken is 0901 and + * working is 1501 which was released on 2007-10-26. + * Enable 64bit DMA on 1501 and anything newer. + * + * Please read bko#9412 for more info. + */ + { + .ident = "ASUS M2A-VM", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, + "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"), + }, + .driver_data = "20071026", /* yyyymmdd */ + }, + /* + * All BIOS versions for the MSI K9A2 Platinum (MS-7376) + * support 64bit DMA. + * + * BIOS versions earlier than 1.5 had the Manufacturer DMI + * fields as "MICRO-STAR INTERANTIONAL CO.,LTD". + * This spelling mistake was fixed in BIOS version 1.5, so + * 1.5 and later have the Manufacturer as + * "MICRO-STAR INTERNATIONAL CO.,LTD". + * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER". + * + * BIOS versions earlier than 1.9 had a Board Product Name + * DMI field of "MS-7376". This was changed to be + * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still + * match on DMI_BOARD_NAME of "MS-7376". + */ + { + .ident = "MSI K9A2 Platinum", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, + "MICRO-STAR INTER"), + DMI_MATCH(DMI_BOARD_NAME, "MS-7376"), + }, + }, + /* + * All BIOS versions for the MSI K9AGM2 (MS-7327) support + * 64bit DMA. + * + * This board also had the typo mentioned above in the + * Manufacturer DMI field (fixed in BIOS version 1.5), so + * match on DMI_BOARD_VENDOR of "MICRO-STAR INTER" again. + */ + { + .ident = "MSI K9AGM2", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, + "MICRO-STAR INTER"), + DMI_MATCH(DMI_BOARD_NAME, "MS-7327"), + }, + }, + /* + * All BIOS versions for the Asus M3A support 64bit DMA. + * (all release versions from 0301 to 1206 were tested) + */ + { + .ident = "ASUS M3A", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, + "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "M3A"), + }, + }, + { } + }; + const struct dmi_system_id *match; + int year, month, date; + char buf[9]; + + match = dmi_first_match(sysids); + if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) || + !match) + return false; + + if (!match->driver_data) + goto enable_64bit; + + dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); + snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); + + if (strcmp(buf, match->driver_data) >= 0) + goto enable_64bit; + else { + dev_warn(&pdev->dev, + "%s: BIOS too old, forcing 32bit DMA, update BIOS\n", + match->ident); + return false; + } - /* turn IRQ back on */ - writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK); +enable_64bit: + dev_warn(&pdev->dev, "%s: enabling 64bit DMA\n", match->ident); + return true; } -static void ahci_error_handler(struct ata_port *ap) +static bool ahci_broken_system_poweroff(struct pci_dev *pdev) { - void __iomem *mmio = ap->host->mmio_base; - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); - - if (!(ap->pflags & ATA_PFLAG_FROZEN)) { - /* restart engine */ - ahci_stop_engine(port_mmio); - ahci_start_engine(port_mmio); + static const struct dmi_system_id broken_systems[] = { + { + .ident = "HP Compaq nx6310", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"), + }, + /* PCI slot number of the controller */ + .driver_data = (void *)0x1FUL, + }, + { + .ident = "HP Compaq 6720s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"), + }, + /* PCI slot number of the controller */ + .driver_data = (void *)0x1FUL, + }, + + { } /* terminate list */ + }; + const struct dmi_system_id *dmi = dmi_first_match(broken_systems); + + if (dmi) { + unsigned long slot = (unsigned long)dmi->driver_data; + /* apply the quirk only to on-board controllers */ + return slot == PCI_SLOT(pdev->devfn); } - /* perform recovery */ - ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset, - ahci_postreset); + return false; } -static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) +static bool ahci_broken_suspend(struct pci_dev *pdev) { - struct ata_port *ap = qc->ap; - void __iomem *mmio = ap->host->mmio_base; - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); - - if (qc->flags & ATA_QCFLAG_FAILED) - qc->err_mask |= AC_ERR_OTHER; - - if (qc->err_mask) { - /* make DMA engine forget about the failed command */ - ahci_stop_engine(port_mmio); - ahci_start_engine(port_mmio); - } + static const struct dmi_system_id sysids[] = { + /* + * On HP dv[4-6] and HDX18 with earlier BIOSen, link + * to the harddisk doesn't become online after + * resuming from STR. Warn and fail suspend. + * + * http://bugzilla.kernel.org/show_bug.cgi?id=12276 + * + * Use dates instead of versions to match as HP is + * apparently recycling both product and version + * strings. + * + * http://bugzilla.kernel.org/show_bug.cgi?id=15462 + */ + { + .ident = "dv4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, + "HP Pavilion dv4 Notebook PC"), + }, + .driver_data = "20090105", /* F.30 */ + }, + { + .ident = "dv5", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, + "HP Pavilion dv5 Notebook PC"), + }, + .driver_data = "20090506", /* F.16 */ + }, + { + .ident = "dv6", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, + "HP Pavilion dv6 Notebook PC"), + }, + .driver_data = "20090423", /* F.21 */ + }, + { + .ident = "HDX18", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, + "HP HDX18 Notebook PC"), + }, + .driver_data = "20090430", /* F.23 */ + }, + /* + * Acer eMachines G725 has the same problem. BIOS + * V1.03 is known to be broken. V3.04 is known to + * work. Between, there are V1.06, V2.06 and V3.03 + * that we don't have much idea about. For now, + * blacklist anything older than V3.04. + * + * http://bugzilla.kernel.org/show_bug.cgi?id=15104 + */ + { + .ident = "G725", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), + DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), + }, + .driver_data = "20091216", /* V3.04 */ + }, + { } /* terminate list */ + }; + const struct dmi_system_id *dmi = dmi_first_match(sysids); + int year, month, date; + char buf[9]; + + if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2)) + return false; + + dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); + snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); + + return strcmp(buf, dmi->driver_data) < 0; } -static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) +static bool ahci_broken_online(struct pci_dev *pdev) { - struct ahci_host_priv *hpriv = ap->host->private_data; - struct ahci_port_priv *pp = ap->private_data; - void __iomem *mmio = ap->host->mmio_base; - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); - const char *emsg = NULL; - int rc; - - rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg); - if (rc) { - ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); - ahci_init_port(port_mmio, hpriv->cap, - pp->cmd_slot_dma, pp->rx_fis_dma); - } - - return rc; +#define ENCODE_BUSDEVFN(bus, slot, func) \ + (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func))) + static const struct dmi_system_id sysids[] = { + /* + * There are several gigabyte boards which use + * SIMG5723s configured as hardware RAID. Certain + * 5723 firmware revisions shipped there keep the link + * online but fail to answer properly to SRST or + * IDENTIFY when no device is attached downstream + * causing libata to retry quite a few times leading + * to excessive detection delay. + * + * As these firmwares respond to the second reset try + * with invalid device signature, considering unknown + * sig as offline works around the problem acceptably. + */ + { + .ident = "EP45-DQ6", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, + "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"), + }, + .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0), + }, + { + .ident = "EP45-DS5", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, + "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"), + }, + .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0), + }, + { } /* terminate list */ + }; +#undef ENCODE_BUSDEVFN + const struct dmi_system_id *dmi = dmi_first_match(sysids); + unsigned int val; + + if (!dmi) + return false; + + val = (unsigned long)dmi->driver_data; + + return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); } -static int ahci_port_resume(struct ata_port *ap) +static bool ahci_broken_devslp(struct pci_dev *pdev) { - struct ahci_port_priv *pp = ap->private_data; - struct ahci_host_priv *hpriv = ap->host->private_data; - void __iomem *mmio = ap->host->mmio_base; - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); + /* device with broken DEVSLP but still showing SDS capability */ + static const struct pci_device_id ids[] = { + { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */ + {} + }; - ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma); - - return 0; + return pci_match_id(ids, pdev); } -static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) +#ifdef CONFIG_ATA_ACPI +static void ahci_gtf_filter_workaround(struct ata_host *host) { - struct ata_host *host = dev_get_drvdata(&pdev->dev); - void __iomem *mmio = host->mmio_base; - u32 ctl; - - if (mesg.event == PM_EVENT_SUSPEND) { - /* AHCI spec rev1.1 section 8.3.3: - * Software must disable interrupts prior to requesting a - * transition of the HBA to D3 state. + static const struct dmi_system_id sysids[] = { + /* + * Aspire 3810T issues a bunch of SATA enable commands + * via _GTF including an invalid one and one which is + * rejected by the device. Among the successful ones + * is FPDMA non-zero offset enable which when enabled + * only on the drive side leads to NCQ command + * failures. Filter it out. */ - ctl = readl(mmio + HOST_CTL); - ctl &= ~HOST_IRQ_EN; - writel(ctl, mmio + HOST_CTL); - readl(mmio + HOST_CTL); /* flush */ - } - - return ata_pci_device_suspend(pdev, mesg); -} - -static int ahci_pci_device_resume(struct pci_dev *pdev) -{ - struct ata_host *host = dev_get_drvdata(&pdev->dev); - struct ahci_host_priv *hpriv = host->private_data; - void __iomem *mmio = host->mmio_base; - int rc; + { + .ident = "Aspire 3810T", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"), + }, + .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET, + }, + { } + }; + const struct dmi_system_id *dmi = dmi_first_match(sysids); + unsigned int filter; + int i; + + if (!dmi) + return; - ata_pci_device_do_resume(pdev); + filter = (unsigned long)dmi->driver_data; + dev_info(host->dev, "applying extra ACPI _GTF filter 0x%x for %s\n", + filter, dmi->ident); - if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { - rc = ahci_reset_controller(mmio, pdev); - if (rc) - return rc; + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + struct ata_link *link; + struct ata_device *dev; - ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap); + ata_for_each_link(link, ap, EDGE) + ata_for_each_dev(dev, link, ALL) + dev->gtf_filter |= filter; } - - ata_host_resume(host); - - return 0; } +#else +static inline void ahci_gtf_filter_workaround(struct ata_host *host) +{} +#endif -static int ahci_port_start(struct ata_port *ap) +static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports, + struct ahci_host_priv *hpriv) { - struct device *dev = ap->host->dev; - struct ahci_host_priv *hpriv = ap->host->private_data; - struct ahci_port_priv *pp; - void __iomem *mmio = ap->host->mmio_base; - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); - void *mem; - dma_addr_t mem_dma; - int rc; - - pp = kmalloc(sizeof(*pp), GFP_KERNEL); - if (!pp) - return -ENOMEM; - memset(pp, 0, sizeof(*pp)); - - rc = ata_pad_alloc(ap, dev); - if (rc) { - kfree(pp); - return rc; - } + int rc, nvec; - mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); - if (!mem) { - ata_pad_free(ap, dev); - kfree(pp); - return -ENOMEM; - } - memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ); - - /* - * First item in chunk of DMA memory: 32-slot command table, - * 32 bytes each in size - */ - pp->cmd_slot = mem; - pp->cmd_slot_dma = mem_dma; + if (hpriv->flags & AHCI_HFLAG_NO_MSI) + goto intx; - mem += AHCI_CMD_SLOT_SZ; - mem_dma += AHCI_CMD_SLOT_SZ; + nvec = pci_msi_vec_count(pdev); + if (nvec < 0) + goto intx; /* - * Second item: Received-FIS area + * If number of MSIs is less than number of ports then Sharing Last + * Message mode could be enforced. In this case assume that advantage + * of multipe MSIs is negated and use single MSI mode instead. */ - pp->rx_fis = mem; - pp->rx_fis_dma = mem_dma; + if (nvec < n_ports) + goto single_msi; - mem += AHCI_RX_FIS_SZ; - mem_dma += AHCI_RX_FIS_SZ; + rc = pci_enable_msi_exact(pdev, nvec); + if (rc == -ENOSPC) + goto single_msi; + else if (rc < 0) + goto intx; - /* - * Third item: data area for storing a single command - * and its scatter-gather table - */ - pp->cmd_tbl = mem; - pp->cmd_tbl_dma = mem_dma; + /* fallback to single MSI mode if the controller enforced MRSM mode */ + if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) { + pci_disable_msi(pdev); + printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n"); + goto single_msi; + } - ap->private_data = pp; + return nvec; - /* initialize port */ - ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma); +single_msi: + if (pci_enable_msi(pdev)) + goto intx; + return 1; +intx: + pci_intx(pdev, 1); return 0; } -static void ahci_port_stop(struct ata_port *ap) -{ - struct device *dev = ap->host->dev; - struct ahci_host_priv *hpriv = ap->host->private_data; - struct ahci_port_priv *pp = ap->private_data; - void __iomem *mmio = ap->host->mmio_base; - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); - const char *emsg = NULL; - int rc; - - /* de-initialize port */ - rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg); - if (rc) - ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); - - ap->private_data = NULL; - dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, - pp->cmd_slot, pp->cmd_slot_dma); - ata_pad_free(ap, dev); - kfree(pp); -} - -static void ahci_setup_port(struct ata_ioports *port, unsigned long base, - unsigned int port_idx) +/** + * ahci_host_activate - start AHCI host, request IRQs and register it + * @host: target ATA host + * @irq: base IRQ number to request + * @n_msis: number of MSIs allocated for this host + * @irq_handler: irq_handler used when requesting IRQs + * @irq_flags: irq_flags used when requesting IRQs + * + * Similar to ata_host_activate, but requests IRQs according to AHCI-1.1 + * when multiple MSIs were allocated. That is one MSI per port, starting + * from @irq. + * + * LOCKING: + * Inherited from calling layer (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis) { - VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx); - base = ahci_port_base_ul(base, port_idx); - VPRINTK("base now==0x%lx\n", base); - - port->cmd_addr = base; - port->scr_addr = base + PORT_SCR; - - VPRINTK("EXIT\n"); -} + int i, rc; -static int ahci_host_init(struct ata_probe_ent *probe_ent) -{ - struct ahci_host_priv *hpriv = probe_ent->private_data; - struct pci_dev *pdev = to_pci_dev(probe_ent->dev); - void __iomem *mmio = probe_ent->mmio_base; - unsigned int i, using_dac; - int rc; + /* Sharing Last Message among several ports is not supported */ + if (n_msis < host->n_ports) + return -EINVAL; - rc = ahci_reset_controller(mmio, pdev); + rc = ata_host_start(host); if (rc) return rc; - hpriv->cap = readl(mmio + HOST_CAP); - hpriv->port_map = readl(mmio + HOST_PORTS_IMPL); - probe_ent->n_ports = (hpriv->cap & 0x1f) + 1; - - VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n", - hpriv->cap, hpriv->port_map, probe_ent->n_ports); + for (i = 0; i < host->n_ports; i++) { + struct ahci_port_priv *pp = host->ports[i]->private_data; - using_dac = hpriv->cap & HOST_CAP_64; - if (using_dac && - !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); - if (rc) { - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "64-bit DMA enable failed\n"); - return rc; - } - } - } else { - rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit DMA enable failed\n"); - return rc; - } - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit consistent DMA enable failed\n"); - return rc; + /* Do not receive interrupts sent by dummy ports */ + if (!pp) { + disable_irq(irq + i); + continue; } - } - for (i = 0; i < probe_ent->n_ports; i++) - ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i); + rc = devm_request_threaded_irq(host->dev, irq + i, + ahci_hw_interrupt, + ahci_thread_fn, IRQF_SHARED, + pp->irq_desc, host->ports[i]); + if (rc) + goto out_free_irqs; + } - ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap); + for (i = 0; i < host->n_ports; i++) + ata_port_desc(host->ports[i], "irq %d", irq + i); - pci_set_master(pdev); + rc = ata_host_register(host, &ahci_sht); + if (rc) + goto out_free_all_irqs; return 0; -} -static void ahci_print_info(struct ata_probe_ent *probe_ent) -{ - struct ahci_host_priv *hpriv = probe_ent->private_data; - struct pci_dev *pdev = to_pci_dev(probe_ent->dev); - void __iomem *mmio = probe_ent->mmio_base; - u32 vers, cap, impl, speed; - const char *speed_s; - u16 cc; - const char *scc_s; - - vers = readl(mmio + HOST_VERSION); - cap = hpriv->cap; - impl = hpriv->port_map; - - speed = (cap >> 20) & 0xf; - if (speed == 1) - speed_s = "1.5"; - else if (speed == 2) - speed_s = "3"; - else - speed_s = "?"; - - pci_read_config_word(pdev, 0x0a, &cc); - if (cc == 0x0101) - scc_s = "IDE"; - else if (cc == 0x0106) - scc_s = "SATA"; - else if (cc == 0x0104) - scc_s = "RAID"; - else - scc_s = "unknown"; +out_free_all_irqs: + i = host->n_ports; +out_free_irqs: + for (i--; i >= 0; i--) + devm_free_irq(host->dev, irq + i, host->ports[i]); - dev_printk(KERN_INFO, &pdev->dev, - "AHCI %02x%02x.%02x%02x " - "%u slots %u ports %s Gbps 0x%x impl %s mode\n" - , - - (vers >> 24) & 0xff, - (vers >> 16) & 0xff, - (vers >> 8) & 0xff, - vers & 0xff, - - ((cap >> 8) & 0x1f) + 1, - (cap & 0x1f) + 1, - speed_s, - impl, - scc_s); - - dev_printk(KERN_INFO, &pdev->dev, - "flags: " - "%s%s%s%s%s%s" - "%s%s%s%s%s%s%s\n" - , - - cap & (1 << 31) ? "64bit " : "", - cap & (1 << 30) ? "ncq " : "", - cap & (1 << 28) ? "ilck " : "", - cap & (1 << 27) ? "stag " : "", - cap & (1 << 26) ? "pm " : "", - cap & (1 << 25) ? "led " : "", - - cap & (1 << 24) ? "clo " : "", - cap & (1 << 19) ? "nz " : "", - cap & (1 << 18) ? "only " : "", - cap & (1 << 17) ? "pmp " : "", - cap & (1 << 15) ? "pio " : "", - cap & (1 << 14) ? "slum " : "", - cap & (1 << 13) ? "part " : "" - ); + return rc; } -static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - struct ata_probe_ent *probe_ent = NULL; + unsigned int board_id = ent->driver_data; + struct ata_port_info pi = ahci_port_info[board_id]; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; - unsigned long base; - void __iomem *mmio_base; - unsigned int board_idx = (unsigned int) ent->driver_data; - int have_msi, pci_dev_busy = 0; - int rc; + struct ata_host *host; + int n_ports, n_msis, i, rc; + int ahci_pci_bar = AHCI_PCI_BAR_STANDARD; VPRINTK("ENTER\n"); - WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS); + WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS); + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + /* The AHCI driver can only drive the SATA ports, the PATA driver + can drive them all so if both drivers are selected make sure + AHCI stays out of the way */ + if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) + return -ENODEV; + + /* Apple BIOS on MCP89 prevents us using AHCI */ + if (is_mcp89_apple(pdev)) + ahci_mcp89_apple_enable(pdev); - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode. + * At the moment, we can only use the AHCI mode. Let the users know + * that for SAS drives they're out of luck. + */ + if (pdev->vendor == PCI_VENDOR_ID_PROMISE) + dev_info(&pdev->dev, + "PDC42819 can only drive SATA devices with this driver\n"); + + /* Both Connext and Enmotus devices use non-standard BARs */ + if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06) + ahci_pci_bar = AHCI_PCI_BAR_STA2X11; + else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000) + ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS; + + /* acquire resources */ + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == 0x2652 || pdev->device == 0x2653)) { + u8 map; - /* JMicron-specific fixup: make sure we're in AHCI mode */ - /* This is protected from races with ata_jmicron by the pci probe - locking */ - if (pdev->vendor == PCI_VENDOR_ID_JMICRON) { - /* AHCI enable, AHCI on function 0 */ - pci_write_config_byte(pdev, 0x41, 0xa1); - /* Function 1 is the PATA controller */ - if (PCI_FUNC(pdev->devfn)) + /* ICH6s share the same PCI ID for both piix and ahci + * modes. Enabling ahci mode while MAP indicates + * combined mode is a bad idea. Yield to ata_piix. + */ + pci_read_config_byte(pdev, ICH_MAP, &map); + if (map & 0x3) { + dev_info(&pdev->dev, + "controller is in combined mode, can't enable AHCI mode\n"); return -ENODEV; + } } - rc = pci_enable_device(pdev); + /* AHCI controllers often implement SFF compatible interface. + * Grab all PCI BARs just in case. + */ + rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); if (rc) return rc; - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - pci_dev_busy = 1; - goto err_out; + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + hpriv->flags |= (unsigned long)pi.private_data; + + /* MCP65 revision A1 and A2 can't do MSI */ + if (board_id == board_ahci_mcp65 && + (pdev->revision == 0xa1 || pdev->revision == 0xa2)) + hpriv->flags |= AHCI_HFLAG_NO_MSI; + + /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */ + if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) + hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; + + /* only some SB600s can do 64bit DMA */ + if (ahci_sb600_enable_64bit(pdev)) + hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY; + + hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; + + /* must set flag prior to save config in order to take effect */ + if (ahci_broken_devslp(pdev)) + hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; + + /* save initial config */ + ahci_pci_save_initial_config(pdev, hpriv); + + /* prepare host */ + if (hpriv->cap & HOST_CAP_NCQ) { + pi.flags |= ATA_FLAG_NCQ; + /* + * Auto-activate optimization is supposed to be + * supported on all AHCI controllers indicating NCQ + * capability, but it seems to be broken on some + * chipsets including NVIDIAs. + */ + if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA)) + pi.flags |= ATA_FLAG_FPDMA_AA; + + /* + * All AHCI controllers should be forward-compatible + * with the new auxiliary field. This code should be + * conditionalized if any buggy AHCI controllers are + * encountered. + */ + pi.flags |= ATA_FLAG_FPDMA_AUX; } - if (pci_enable_msi(pdev) == 0) - have_msi = 1; - else { - pci_intx(pdev, 1); - have_msi = 0; - } + if (hpriv->cap & HOST_CAP_PMP) + pi.flags |= ATA_FLAG_PMP; - probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); - if (probe_ent == NULL) { - rc = -ENOMEM; - goto err_out_msi; - } + ahci_set_em_messages(hpriv, &pi); - memset(probe_ent, 0, sizeof(*probe_ent)); - probe_ent->dev = pci_dev_to_dev(pdev); - INIT_LIST_HEAD(&probe_ent->node); + if (ahci_broken_system_poweroff(pdev)) { + pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN; + dev_info(&pdev->dev, + "quirky BIOS, skipping spindown on poweroff\n"); + } - mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0); - if (mmio_base == NULL) { - rc = -ENOMEM; - goto err_out_free_ent; + if (ahci_broken_suspend(pdev)) { + hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; + dev_warn(&pdev->dev, + "BIOS update required for suspend/resume\n"); } - base = (unsigned long) mmio_base; - hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL); - if (!hpriv) { - rc = -ENOMEM; - goto err_out_iounmap; + if (ahci_broken_online(pdev)) { + hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE; + dev_info(&pdev->dev, + "online status unreliable, applying workaround\n"); } - memset(hpriv, 0, sizeof(*hpriv)); - probe_ent->sht = ahci_port_info[board_idx].sht; - probe_ent->port_flags = ahci_port_info[board_idx].flags; - probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask; - probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask; - probe_ent->port_ops = ahci_port_info[board_idx].port_ops; + /* CAP.NP sometimes indicate the index of the last enabled + * port, at other times, that of the last possible port, so + * determining the maximum port number requires looking at + * both CAP.NP and port_map. + */ + n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->mmio_base = mmio_base; - probe_ent->private_data = hpriv; + n_msis = ahci_init_interrupts(pdev, n_ports, hpriv); + if (n_msis > 1) + hpriv->flags |= AHCI_HFLAG_MULTI_MSI; - if (have_msi) - hpriv->flags |= AHCI_FLAG_MSI; + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); + if (!host) + return -ENOMEM; + host->private_data = hpriv; - /* initialize adapter */ - rc = ahci_host_init(probe_ent); - if (rc) - goto err_out_hpriv; + if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) + host->flags |= ATA_HOST_PARALLEL_SCAN; + else + dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n"); - if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) && - (hpriv->cap & HOST_CAP_NCQ)) - probe_ent->port_flags |= ATA_FLAG_NCQ; + if (pi.flags & ATA_FLAG_EM) + ahci_reset_em(host); - ahci_print_info(probe_ent); + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; - /* FIXME: check ata_device_add return value */ - ata_device_add(probe_ent); - kfree(probe_ent); + ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar"); + ata_port_pbar_desc(ap, ahci_pci_bar, + 0x100 + ap->port_no * 0x80, "port"); - return 0; + /* set enclosure management message type */ + if (ap->flags & ATA_FLAG_EM) + ap->em_message_type = hpriv->em_msg_type; -err_out_hpriv: - kfree(hpriv); -err_out_iounmap: - pci_iounmap(pdev, mmio_base); -err_out_free_ent: - kfree(probe_ent); -err_out_msi: - if (have_msi) - pci_disable_msi(pdev); - else - pci_intx(pdev, 0); - pci_release_regions(pdev); -err_out: - if (!pci_dev_busy) - pci_disable_device(pdev); - return rc; -} -static void ahci_remove_one (struct pci_dev *pdev) -{ - struct device *dev = pci_dev_to_dev(pdev); - struct ata_host *host = dev_get_drvdata(dev); - struct ahci_host_priv *hpriv = host->private_data; - unsigned int i; - int have_msi; + /* disabled/not-implemented port */ + if (!(hpriv->port_map & (1 << i))) + ap->ops = &ata_dummy_port_ops; + } - for (i = 0; i < host->n_ports; i++) - ata_port_detach(host->ports[i]); + /* apply workaround for ASUS P5W DH Deluxe mainboard */ + ahci_p5wdh_workaround(host); - have_msi = hpriv->flags & AHCI_FLAG_MSI; - free_irq(host->irq, host); + /* apply gtf filter quirk */ + ahci_gtf_filter_workaround(host); - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap = host->ports[i]; + /* initialize adapter */ + rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); + if (rc) + return rc; - ata_scsi_release(ap->scsi_host); - scsi_host_put(ap->scsi_host); - } + rc = ahci_pci_reset_controller(host); + if (rc) + return rc; - kfree(hpriv); - pci_iounmap(pdev, host->mmio_base); - kfree(host); + ahci_pci_init_controller(host); + ahci_pci_print_info(host); - if (have_msi) - pci_disable_msi(pdev); - else - pci_intx(pdev, 0); - pci_release_regions(pdev); - pci_disable_device(pdev); - dev_set_drvdata(dev, NULL); -} + pci_set_master(pdev); -static int __init ahci_init(void) -{ - return pci_register_driver(&ahci_pci_driver); -} + if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) + return ahci_host_activate(host, pdev->irq, n_msis); -static void __exit ahci_exit(void) -{ - pci_unregister_driver(&ahci_pci_driver); + return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, + &ahci_sht); } +module_pci_driver(ahci_pci_driver); MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("AHCI SATA low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ahci_pci_tbl); MODULE_VERSION(DRV_VERSION); - -module_init(ahci_init); -module_exit(ahci_exit); diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h new file mode 100644 index 00000000000..5513296e5e2 --- /dev/null +++ b/drivers/ata/ahci.h @@ -0,0 +1,410 @@ +/* + * ahci.h - Common AHCI SATA definitions and declarations + * + * Maintained by: Tejun Heo <tj@kernel.org> + * Please ALWAYS copy linux-ide@vger.kernel.org + * on emails. + * + * Copyright 2004-2005 Red Hat, Inc. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/DocBook/libata.* + * + * AHCI hardware documentation: + * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf + * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf + * + */ + +#ifndef _AHCI_H +#define _AHCI_H + +#include <linux/clk.h> +#include <linux/libata.h> +#include <linux/phy/phy.h> +#include <linux/regulator/consumer.h> + +/* Enclosure Management Control */ +#define EM_CTRL_MSG_TYPE 0x000f0000 + +/* Enclosure Management LED Message Type */ +#define EM_MSG_LED_HBA_PORT 0x0000000f +#define EM_MSG_LED_PMP_SLOT 0x0000ff00 +#define EM_MSG_LED_VALUE 0xffff0000 +#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000 +#define EM_MSG_LED_VALUE_OFF 0xfff80000 +#define EM_MSG_LED_VALUE_ON 0x00010000 + +enum { + AHCI_MAX_PORTS = 32, + AHCI_MAX_CLKS = 3, + AHCI_MAX_SG = 168, /* hardware max is 64K */ + AHCI_DMA_BOUNDARY = 0xffffffff, + AHCI_MAX_CMDS = 32, + AHCI_CMD_SZ = 32, + AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, + AHCI_RX_FIS_SZ = 256, + AHCI_CMD_TBL_CDB = 0x40, + AHCI_CMD_TBL_HDR_SZ = 0x80, + AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), + AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, + AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + + AHCI_RX_FIS_SZ, + AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ + + AHCI_CMD_TBL_AR_SZ + + (AHCI_RX_FIS_SZ * 16), + AHCI_IRQ_ON_SG = (1 << 31), + AHCI_CMD_ATAPI = (1 << 5), + AHCI_CMD_WRITE = (1 << 6), + AHCI_CMD_PREFETCH = (1 << 7), + AHCI_CMD_RESET = (1 << 8), + AHCI_CMD_CLR_BUSY = (1 << 10), + + RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */ + RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ + RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ + RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ + + /* global controller registers */ + HOST_CAP = 0x00, /* host capabilities */ + HOST_CTL = 0x04, /* global host control */ + HOST_IRQ_STAT = 0x08, /* interrupt status */ + HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ + HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ + HOST_EM_LOC = 0x1c, /* Enclosure Management location */ + HOST_EM_CTL = 0x20, /* Enclosure Management Control */ + HOST_CAP2 = 0x24, /* host capabilities, extended */ + + /* HOST_CTL bits */ + HOST_RESET = (1 << 0), /* reset controller; self-clear */ + HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ + HOST_MRSM = (1 << 2), /* MSI Revert to Single Message */ + HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ + + /* HOST_CAP bits */ + HOST_CAP_SXS = (1 << 5), /* Supports External SATA */ + HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ + HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */ + HOST_CAP_PART = (1 << 13), /* Partial state capable */ + HOST_CAP_SSC = (1 << 14), /* Slumber state capable */ + HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */ + HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */ + HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ + HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */ + HOST_CAP_CLO = (1 << 24), /* Command List Override support */ + HOST_CAP_LED = (1 << 25), /* Supports activity LED */ + HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ + HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ + HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */ + HOST_CAP_SNTF = (1 << 29), /* SNotification register */ + HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ + HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ + + /* HOST_CAP2 bits */ + HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */ + HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */ + HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */ + HOST_CAP2_SDS = (1 << 3), /* Support device sleep */ + HOST_CAP2_SADM = (1 << 4), /* Support aggressive DevSlp */ + HOST_CAP2_DESO = (1 << 5), /* DevSlp from slumber only */ + + /* registers for each SATA port */ + PORT_LST_ADDR = 0x00, /* command list DMA addr */ + PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ + PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ + PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ + PORT_IRQ_STAT = 0x10, /* interrupt status */ + PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ + PORT_CMD = 0x18, /* port command */ + PORT_TFDATA = 0x20, /* taskfile data */ + PORT_SIG = 0x24, /* device TF signature */ + PORT_CMD_ISSUE = 0x38, /* command issue */ + PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ + PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ + PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ + PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ + PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ + PORT_FBS = 0x40, /* FIS-based Switching */ + PORT_DEVSLP = 0x44, /* device sleep */ + + /* PORT_IRQ_{STAT,MASK} bits */ + PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ + PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ + PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ + PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ + PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ + PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ + PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ + PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ + + PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ + PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ + PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ + PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ + PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ + PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ + PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ + PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ + PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ + + PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | + PORT_IRQ_IF_ERR | + PORT_IRQ_CONNECT | + PORT_IRQ_PHYRDY | + PORT_IRQ_UNK_FIS | + PORT_IRQ_BAD_PMP, + PORT_IRQ_ERROR = PORT_IRQ_FREEZE | + PORT_IRQ_TF_ERR | + PORT_IRQ_HBUS_DATA_ERR, + DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | + PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | + PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, + + /* PORT_CMD bits */ + PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ + PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ + PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ + PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */ + PORT_CMD_PMP = (1 << 17), /* PMP attached */ + PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ + PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ + PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ + PORT_CMD_CLO = (1 << 3), /* Command list override */ + PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ + PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ + PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ + + PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ + PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ + PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ + PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ + + /* PORT_FBS bits */ + PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */ + PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */ + PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */ + PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */ + PORT_FBS_SDE = (1 << 2), /* FBS single device error */ + PORT_FBS_DEC = (1 << 1), /* FBS device error clear */ + PORT_FBS_EN = (1 << 0), /* Enable FBS */ + + /* PORT_DEVSLP bits */ + PORT_DEVSLP_DM_OFFSET = 25, /* DITO multiplier offset */ + PORT_DEVSLP_DM_MASK = (0xf << 25), /* DITO multiplier mask */ + PORT_DEVSLP_DITO_OFFSET = 15, /* DITO offset */ + PORT_DEVSLP_MDAT_OFFSET = 10, /* Minimum assertion time */ + PORT_DEVSLP_DETO_OFFSET = 2, /* DevSlp exit timeout */ + PORT_DEVSLP_DSP = (1 << 1), /* DevSlp present */ + PORT_DEVSLP_ADSE = (1 << 0), /* Aggressive DevSlp enable */ + + /* hpriv->flags bits */ + +#define AHCI_HFLAGS(flags) .private_data = (void *)(flags) + + AHCI_HFLAG_NO_NCQ = (1 << 0), + AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ + AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ + AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ + AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ + AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ + AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ + AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ + AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ + AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ + AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as + link offline */ + AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ + AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ + AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */ + AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on + port start (wait until + error-handling stage) */ + AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */ + AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ + AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ + + /* ap->flags bits */ + + AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | + ATA_FLAG_ACPI_SATA | ATA_FLAG_AN, + + ICH_MAP = 0x90, /* ICH MAP register */ + + /* em constants */ + EM_MAX_SLOTS = 8, + EM_MAX_RETRY = 5, + + /* em_ctl bits */ + EM_CTL_RST = (1 << 9), /* Reset */ + EM_CTL_TM = (1 << 8), /* Transmit Message */ + EM_CTL_MR = (1 << 0), /* Message Received */ + EM_CTL_ALHD = (1 << 26), /* Activity LED */ + EM_CTL_XMT = (1 << 25), /* Transmit Only */ + EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ + EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */ + EM_CTL_SES = (1 << 18), /* SES-2 messages supported */ + EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */ + EM_CTL_LED = (1 << 16), /* LED messages supported */ + + /* em message type */ + EM_MSG_TYPE_LED = (1 << 0), /* LED */ + EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */ + EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */ + EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */ +}; + +struct ahci_cmd_hdr { + __le32 opts; + __le32 status; + __le32 tbl_addr; + __le32 tbl_addr_hi; + __le32 reserved[4]; +}; + +struct ahci_sg { + __le32 addr; + __le32 addr_hi; + __le32 reserved; + __le32 flags_size; +}; + +struct ahci_em_priv { + enum sw_activity blink_policy; + struct timer_list timer; + unsigned long saved_activity; + unsigned long activity; + unsigned long led_state; +}; + +struct ahci_port_priv { + struct ata_link *active_link; + struct ahci_cmd_hdr *cmd_slot; + dma_addr_t cmd_slot_dma; + void *cmd_tbl; + dma_addr_t cmd_tbl_dma; + void *rx_fis; + dma_addr_t rx_fis_dma; + /* for NCQ spurious interrupt analysis */ + unsigned int ncq_saw_d2h:1; + unsigned int ncq_saw_dmas:1; + unsigned int ncq_saw_sdb:1; + u32 intr_status; /* interrupts to handle */ + spinlock_t lock; /* protects parent ata_port */ + u32 intr_mask; /* interrupts to enable */ + bool fbs_supported; /* set iff FBS is supported */ + bool fbs_enabled; /* set iff FBS is enabled */ + int fbs_last_dev; /* save FBS.DEV of last FIS */ + /* enclosure management info per PM slot */ + struct ahci_em_priv em_priv[EM_MAX_SLOTS]; + char *irq_desc; /* desc in /proc/interrupts */ +}; + +struct ahci_host_priv { + void __iomem * mmio; /* bus-independent mem map */ + unsigned int flags; /* AHCI_HFLAG_* */ + u32 cap; /* cap to use */ + u32 cap2; /* cap2 to use */ + u32 port_map; /* port map to use */ + u32 saved_cap; /* saved initial cap */ + u32 saved_cap2; /* saved initial cap2 */ + u32 saved_port_map; /* saved initial port_map */ + u32 em_loc; /* enclosure management location */ + u32 em_buf_sz; /* EM buffer size in byte */ + u32 em_msg_type; /* EM message type */ + bool got_runtime_pm; /* Did we do pm_runtime_get? */ + struct clk *clks[AHCI_MAX_CLKS]; /* Optional */ + struct regulator *target_pwr; /* Optional */ + struct phy *phy; /* If platform uses phy */ + void *plat_data; /* Other platform data */ + /* + * Optional ahci_start_engine override, if not set this gets set to the + * default ahci_start_engine during ahci_save_initial_config, this can + * be overridden anytime before the host is activated. + */ + void (*start_engine)(struct ata_port *ap); +}; + +extern int ahci_ignore_sss; + +extern struct device_attribute *ahci_shost_attrs[]; +extern struct device_attribute *ahci_sdev_attrs[]; + +#define AHCI_SHT(drv_name) \ + ATA_NCQ_SHT(drv_name), \ + .can_queue = AHCI_MAX_CMDS - 1, \ + .sg_tablesize = AHCI_MAX_SG, \ + .dma_boundary = AHCI_DMA_BOUNDARY, \ + .shost_attrs = ahci_shost_attrs, \ + .sdev_attrs = ahci_sdev_attrs + +extern struct ata_port_operations ahci_ops; +extern struct ata_port_operations ahci_platform_ops; +extern struct ata_port_operations ahci_pmp_retry_srst_ops; + +unsigned int ahci_dev_classify(struct ata_port *ap); +void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, + u32 opts); +void ahci_save_initial_config(struct device *dev, + struct ahci_host_priv *hpriv, + unsigned int force_port_map, + unsigned int mask_port_map); +void ahci_init_controller(struct ata_host *host); +int ahci_reset_controller(struct ata_host *host); + +int ahci_do_softreset(struct ata_link *link, unsigned int *class, + int pmp, unsigned long deadline, + int (*check_ready)(struct ata_link *link)); + +unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); +int ahci_stop_engine(struct ata_port *ap); +void ahci_start_fis_rx(struct ata_port *ap); +void ahci_start_engine(struct ata_port *ap); +int ahci_check_ready(struct ata_link *link); +int ahci_kick_engine(struct ata_port *ap); +int ahci_port_resume(struct ata_port *ap); +void ahci_set_em_messages(struct ahci_host_priv *hpriv, + struct ata_port_info *pi); +int ahci_reset_em(struct ata_host *host); +irqreturn_t ahci_interrupt(int irq, void *dev_instance); +irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance); +irqreturn_t ahci_thread_fn(int irq, void *dev_instance); +void ahci_print_info(struct ata_host *host, const char *scc_s); +int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis); +void ahci_error_handler(struct ata_port *ap); + +static inline void __iomem *__ahci_port_base(struct ata_host *host, + unsigned int port_no) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + + return mmio + 0x100 + (port_no * 0x80); +} + +static inline void __iomem *ahci_port_base(struct ata_port *ap) +{ + return __ahci_port_base(ap->host, ap->port_no); +} + +static inline int ahci_nr_ports(u32 cap) +{ + return (cap & 0x1f) + 1; +} + +#endif /* _AHCI_H */ diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c new file mode 100644 index 00000000000..2b77d53bccf --- /dev/null +++ b/drivers/ata/ahci_da850.c @@ -0,0 +1,115 @@ +/* + * DaVinci DA850 AHCI SATA platform driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pm.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/libata.h> +#include <linux/ahci_platform.h> +#include "ahci.h" + +/* SATA PHY Control Register offset from AHCI base */ +#define SATA_P0PHYCR_REG 0x178 + +#define SATA_PHY_MPY(x) ((x) << 0) +#define SATA_PHY_LOS(x) ((x) << 6) +#define SATA_PHY_RXCDR(x) ((x) << 10) +#define SATA_PHY_RXEQ(x) ((x) << 13) +#define SATA_PHY_TXSWING(x) ((x) << 19) +#define SATA_PHY_ENPLL(x) ((x) << 31) + +/* + * The multiplier needed for 1.5GHz PLL output. + * + * NOTE: This is currently hardcoded to be suitable for 100MHz crystal + * frequency (which is used by DA850 EVM board) and may need to be changed + * if you would like to use this driver on some other board. + */ +#define DA850_SATA_CLK_MULTIPLIER 7 + +static void da850_sata_init(struct device *dev, void __iomem *pwrdn_reg, + void __iomem *ahci_base) +{ + unsigned int val; + + /* Enable SATA clock receiver */ + val = readl(pwrdn_reg); + val &= ~BIT(0); + writel(val, pwrdn_reg); + + val = SATA_PHY_MPY(DA850_SATA_CLK_MULTIPLIER + 1) | SATA_PHY_LOS(1) | + SATA_PHY_RXCDR(4) | SATA_PHY_RXEQ(1) | SATA_PHY_TXSWING(3) | + SATA_PHY_ENPLL(1); + + writel(val, ahci_base + SATA_P0PHYCR_REG); +} + +static const struct ata_port_info ahci_da850_port_info = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_platform_ops, +}; + +static int ahci_da850_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ahci_host_priv *hpriv; + struct resource *res; + void __iomem *pwrdn_reg; + int rc; + + hpriv = ahci_platform_get_resources(pdev); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); + + rc = ahci_platform_enable_resources(hpriv); + if (rc) + return rc; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) + goto disable_resources; + + pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res)); + if (!pwrdn_reg) + goto disable_resources; + + da850_sata_init(dev, pwrdn_reg, hpriv->mmio); + + rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info, + 0, 0, 0); + if (rc) + goto disable_resources; + + return 0; +disable_resources: + ahci_platform_disable_resources(hpriv); + return rc; +} + +static SIMPLE_DEV_PM_OPS(ahci_da850_pm_ops, ahci_platform_suspend, + ahci_platform_resume); + +static struct platform_driver ahci_da850_driver = { + .probe = ahci_da850_probe, + .remove = ata_platform_remove_one, + .driver = { + .name = "ahci_da850", + .owner = THIS_MODULE, + .pm = &ahci_da850_pm_ops, + }, +}; +module_platform_driver(ahci_da850_driver); + +MODULE_DESCRIPTION("DaVinci DA850 AHCI SATA platform driver"); +MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c new file mode 100644 index 00000000000..cac4360f272 --- /dev/null +++ b/drivers/ata/ahci_imx.c @@ -0,0 +1,527 @@ +/* + * copyright (c) 2013 Freescale Semiconductor, Inc. + * Freescale IMX AHCI SATA platform driver + * + * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/ahci_platform.h> +#include <linux/of_device.h> +#include <linux/mfd/syscon.h> +#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> +#include <linux/libata.h> +#include "ahci.h" + +enum { + /* Timer 1-ms Register */ + IMX_TIMER1MS = 0x00e0, + /* Port0 PHY Control Register */ + IMX_P0PHYCR = 0x0178, + IMX_P0PHYCR_TEST_PDDQ = 1 << 20, + IMX_P0PHYCR_CR_READ = 1 << 19, + IMX_P0PHYCR_CR_WRITE = 1 << 18, + IMX_P0PHYCR_CR_CAP_DATA = 1 << 17, + IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16, + /* Port0 PHY Status Register */ + IMX_P0PHYSR = 0x017c, + IMX_P0PHYSR_CR_ACK = 1 << 18, + IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0, + /* Lane0 Output Status Register */ + IMX_LANE0_OUT_STAT = 0x2003, + IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1, + /* Clock Reset Register */ + IMX_CLOCK_RESET = 0x7f3f, + IMX_CLOCK_RESET_RESET = 1 << 0, +}; + +enum ahci_imx_type { + AHCI_IMX53, + AHCI_IMX6Q, +}; + +struct imx_ahci_priv { + struct platform_device *ahci_pdev; + enum ahci_imx_type type; + struct clk *sata_clk; + struct clk *sata_ref_clk; + struct clk *ahb_clk; + struct regmap *gpr; + bool no_device; + bool first_time; +}; + +static int ahci_imx_hotplug; +module_param_named(hotplug, ahci_imx_hotplug, int, 0644); +MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)"); + +static void ahci_imx_host_stop(struct ata_host *host); + +static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert) +{ + int timeout = 10; + u32 crval; + u32 srval; + + /* Assert or deassert the bit */ + crval = readl(mmio + IMX_P0PHYCR); + if (assert) + crval |= bit; + else + crval &= ~bit; + writel(crval, mmio + IMX_P0PHYCR); + + /* Wait for the cr_ack signal */ + do { + srval = readl(mmio + IMX_P0PHYSR); + if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK) + break; + usleep_range(100, 200); + } while (--timeout); + + return timeout ? 0 : -ETIMEDOUT; +} + +static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio) +{ + u32 crval = addr; + int ret; + + /* Supply the address on cr_data_in */ + writel(crval, mmio + IMX_P0PHYCR); + + /* Assert the cr_cap_addr signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true); + if (ret) + return ret; + + /* Deassert cr_cap_addr */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false); + if (ret) + return ret; + + return 0; +} + +static int imx_phy_reg_write(u16 val, void __iomem *mmio) +{ + u32 crval = val; + int ret; + + /* Supply the data on cr_data_in */ + writel(crval, mmio + IMX_P0PHYCR); + + /* Assert the cr_cap_data signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true); + if (ret) + return ret; + + /* Deassert cr_cap_data */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false); + if (ret) + return ret; + + if (val & IMX_CLOCK_RESET_RESET) { + /* + * In case we're resetting the phy, it's unable to acknowledge, + * so we return immediately here. + */ + crval |= IMX_P0PHYCR_CR_WRITE; + writel(crval, mmio + IMX_P0PHYCR); + goto out; + } + + /* Assert the cr_write signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true); + if (ret) + return ret; + + /* Deassert cr_write */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false); + if (ret) + return ret; + +out: + return 0; +} + +static int imx_phy_reg_read(u16 *val, void __iomem *mmio) +{ + int ret; + + /* Assert the cr_read signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true); + if (ret) + return ret; + + /* Capture the data from cr_data_out[] */ + *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT; + + /* Deassert cr_read */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false); + if (ret) + return ret; + + return 0; +} + +static int imx_sata_phy_reset(struct ahci_host_priv *hpriv) +{ + void __iomem *mmio = hpriv->mmio; + int timeout = 10; + u16 val; + int ret; + + /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */ + ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio); + if (ret) + return ret; + ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio); + if (ret) + return ret; + + /* Wait for PHY RX_PLL to be stable */ + do { + usleep_range(100, 200); + ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio); + if (ret) + return ret; + ret = imx_phy_reg_read(&val, mmio); + if (ret) + return ret; + if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE) + break; + } while (--timeout); + + return timeout ? 0 : -ETIMEDOUT; +} + +static int imx_sata_enable(struct ahci_host_priv *hpriv) +{ + struct imx_ahci_priv *imxpriv = hpriv->plat_data; + struct device *dev = &imxpriv->ahci_pdev->dev; + int ret; + + if (imxpriv->no_device) + return 0; + + if (hpriv->target_pwr) { + ret = regulator_enable(hpriv->target_pwr); + if (ret) + return ret; + } + + ret = clk_prepare_enable(imxpriv->sata_ref_clk); + if (ret < 0) + goto disable_regulator; + + if (imxpriv->type == AHCI_IMX6Q) { + /* + * set PHY Paremeters, two steps to configure the GPR13, + * one write for rest of parameters, mask of first write + * is 0x07ffffff, and the other one write for setting + * the mpll_clk_en. + */ + regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, + IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK | + IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK | + IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK | + IMX6Q_GPR13_SATA_SPD_MODE_MASK | + IMX6Q_GPR13_SATA_MPLL_SS_EN | + IMX6Q_GPR13_SATA_TX_ATTEN_MASK | + IMX6Q_GPR13_SATA_TX_BOOST_MASK | + IMX6Q_GPR13_SATA_TX_LVL_MASK | + IMX6Q_GPR13_SATA_MPLL_CLK_EN | + IMX6Q_GPR13_SATA_TX_EDGE_RATE, + IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB | + IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M | + IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F | + IMX6Q_GPR13_SATA_SPD_MODE_3P0G | + IMX6Q_GPR13_SATA_MPLL_SS_EN | + IMX6Q_GPR13_SATA_TX_ATTEN_9_16 | + IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB | + IMX6Q_GPR13_SATA_TX_LVL_1_025_V); + regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, + IMX6Q_GPR13_SATA_MPLL_CLK_EN, + IMX6Q_GPR13_SATA_MPLL_CLK_EN); + + usleep_range(100, 200); + + ret = imx_sata_phy_reset(hpriv); + if (ret) { + dev_err(dev, "failed to reset phy: %d\n", ret); + goto disable_regulator; + } + } + + usleep_range(1000, 2000); + + return 0; + +disable_regulator: + if (hpriv->target_pwr) + regulator_disable(hpriv->target_pwr); + + return ret; +} + +static void imx_sata_disable(struct ahci_host_priv *hpriv) +{ + struct imx_ahci_priv *imxpriv = hpriv->plat_data; + + if (imxpriv->no_device) + return; + + if (imxpriv->type == AHCI_IMX6Q) { + regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, + IMX6Q_GPR13_SATA_MPLL_CLK_EN, + !IMX6Q_GPR13_SATA_MPLL_CLK_EN); + } + + clk_disable_unprepare(imxpriv->sata_ref_clk); + + if (hpriv->target_pwr) + regulator_disable(hpriv->target_pwr); +} + +static void ahci_imx_error_handler(struct ata_port *ap) +{ + u32 reg_val; + struct ata_device *dev; + struct ata_host *host = dev_get_drvdata(ap->dev); + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + struct imx_ahci_priv *imxpriv = hpriv->plat_data; + + ahci_error_handler(ap); + + if (!(imxpriv->first_time) || ahci_imx_hotplug) + return; + + imxpriv->first_time = false; + + ata_for_each_dev(dev, &ap->link, ENABLED) + return; + /* + * Disable link to save power. An imx ahci port can't be recovered + * without full reset once the pddq mode is enabled making it + * impossible to use as part of libata LPM. + */ + reg_val = readl(mmio + IMX_P0PHYCR); + writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR); + imx_sata_disable(hpriv); + imxpriv->no_device = true; + + dev_info(ap->dev, "no device found, disabling link.\n"); + dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX ".hotplug=1 to enable hotplug\n"); +} + +static int ahci_imx_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct ata_host *host = dev_get_drvdata(ap->dev); + struct ahci_host_priv *hpriv = host->private_data; + struct imx_ahci_priv *imxpriv = hpriv->plat_data; + int ret = -EIO; + + if (imxpriv->type == AHCI_IMX53) + ret = ahci_pmp_retry_srst_ops.softreset(link, class, deadline); + else if (imxpriv->type == AHCI_IMX6Q) + ret = ahci_ops.softreset(link, class, deadline); + + return ret; +} + +static struct ata_port_operations ahci_imx_ops = { + .inherits = &ahci_ops, + .host_stop = ahci_imx_host_stop, + .error_handler = ahci_imx_error_handler, + .softreset = ahci_imx_softreset, +}; + +static const struct ata_port_info ahci_imx_port_info = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_imx_ops, +}; + +static const struct of_device_id imx_ahci_of_match[] = { + { .compatible = "fsl,imx53-ahci", .data = (void *)AHCI_IMX53 }, + { .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q }, + {}, +}; +MODULE_DEVICE_TABLE(of, imx_ahci_of_match); + +static int imx_ahci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct of_device_id *of_id; + struct ahci_host_priv *hpriv; + struct imx_ahci_priv *imxpriv; + unsigned int reg_val; + int ret; + + of_id = of_match_device(imx_ahci_of_match, dev); + if (!of_id) + return -EINVAL; + + imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL); + if (!imxpriv) + return -ENOMEM; + + imxpriv->ahci_pdev = pdev; + imxpriv->no_device = false; + imxpriv->first_time = true; + imxpriv->type = (enum ahci_imx_type)of_id->data; + + imxpriv->sata_clk = devm_clk_get(dev, "sata"); + if (IS_ERR(imxpriv->sata_clk)) { + dev_err(dev, "can't get sata clock.\n"); + return PTR_ERR(imxpriv->sata_clk); + } + + imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref"); + if (IS_ERR(imxpriv->sata_ref_clk)) { + dev_err(dev, "can't get sata_ref clock.\n"); + return PTR_ERR(imxpriv->sata_ref_clk); + } + + imxpriv->ahb_clk = devm_clk_get(dev, "ahb"); + if (IS_ERR(imxpriv->ahb_clk)) { + dev_err(dev, "can't get ahb clock.\n"); + return PTR_ERR(imxpriv->ahb_clk); + } + + if (imxpriv->type == AHCI_IMX6Q) { + imxpriv->gpr = syscon_regmap_lookup_by_compatible( + "fsl,imx6q-iomuxc-gpr"); + if (IS_ERR(imxpriv->gpr)) { + dev_err(dev, + "failed to find fsl,imx6q-iomux-gpr regmap\n"); + return PTR_ERR(imxpriv->gpr); + } + } + + hpriv = ahci_platform_get_resources(pdev); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); + + hpriv->plat_data = imxpriv; + + ret = clk_prepare_enable(imxpriv->sata_clk); + if (ret) + return ret; + + ret = imx_sata_enable(hpriv); + if (ret) + goto disable_clk; + + /* + * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, + * and IP vendor specific register IMX_TIMER1MS. + * Configure CAP_SSS (support stagered spin up). + * Implement the port0. + * Get the ahb clock rate, and configure the TIMER1MS register. + */ + reg_val = readl(hpriv->mmio + HOST_CAP); + if (!(reg_val & HOST_CAP_SSS)) { + reg_val |= HOST_CAP_SSS; + writel(reg_val, hpriv->mmio + HOST_CAP); + } + reg_val = readl(hpriv->mmio + HOST_PORTS_IMPL); + if (!(reg_val & 0x1)) { + reg_val |= 0x1; + writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL); + } + + reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; + writel(reg_val, hpriv->mmio + IMX_TIMER1MS); + + ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, + 0, 0, 0); + if (ret) + goto disable_sata; + + return 0; + +disable_sata: + imx_sata_disable(hpriv); +disable_clk: + clk_disable_unprepare(imxpriv->sata_clk); + return ret; +} + +static void ahci_imx_host_stop(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + struct imx_ahci_priv *imxpriv = hpriv->plat_data; + + imx_sata_disable(hpriv); + clk_disable_unprepare(imxpriv->sata_clk); +} + +#ifdef CONFIG_PM_SLEEP +static int imx_ahci_suspend(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + int ret; + + ret = ahci_platform_suspend_host(dev); + if (ret) + return ret; + + imx_sata_disable(hpriv); + + return 0; +} + +static int imx_ahci_resume(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + int ret; + + ret = imx_sata_enable(hpriv); + if (ret) + return ret; + + return ahci_platform_resume_host(dev); +} +#endif + +static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume); + +static struct platform_driver imx_ahci_driver = { + .probe = imx_ahci_probe, + .remove = ata_platform_remove_one, + .driver = { + .name = "ahci-imx", + .owner = THIS_MODULE, + .of_match_table = imx_ahci_of_match, + .pm = &ahci_imx_pm_ops, + }, +}; +module_platform_driver(imx_ahci_driver); + +MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver"); +MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("ahci:imx"); diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c new file mode 100644 index 00000000000..fd3dfd733b8 --- /dev/null +++ b/drivers/ata/ahci_mvebu.c @@ -0,0 +1,128 @@ +/* + * AHCI glue platform driver for Marvell EBU SOCs + * + * Copyright (C) 2014 Marvell + * + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/ahci_platform.h> +#include <linux/kernel.h> +#include <linux/mbus.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include "ahci.h" + +#define AHCI_VENDOR_SPECIFIC_0_ADDR 0xa0 +#define AHCI_VENDOR_SPECIFIC_0_DATA 0xa4 + +#define AHCI_WINDOW_CTRL(win) (0x60 + ((win) << 4)) +#define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4)) +#define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4)) + +static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv, + const struct mbus_dram_target_info *dram) +{ + int i; + + for (i = 0; i < 4; i++) { + writel(0, hpriv->mmio + AHCI_WINDOW_CTRL(i)); + writel(0, hpriv->mmio + AHCI_WINDOW_BASE(i)); + writel(0, hpriv->mmio + AHCI_WINDOW_SIZE(i)); + } + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + writel((cs->mbus_attr << 8) | + (dram->mbus_dram_target_id << 4) | 1, + hpriv->mmio + AHCI_WINDOW_CTRL(i)); + writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i)); + writel(((cs->size - 1) & 0xffff0000), + hpriv->mmio + AHCI_WINDOW_SIZE(i)); + } +} + +static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv) +{ + /* + * Enable the regret bit to allow the SATA unit to regret a + * request that didn't receive an acknowlegde and avoid a + * deadlock + */ + writel(0x4, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR); + writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); +} + +static const struct ata_port_info ahci_mvebu_port_info = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_platform_ops, +}; + +static int ahci_mvebu_probe(struct platform_device *pdev) +{ + struct ahci_host_priv *hpriv; + const struct mbus_dram_target_info *dram; + int rc; + + hpriv = ahci_platform_get_resources(pdev); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); + + rc = ahci_platform_enable_resources(hpriv); + if (rc) + return rc; + + dram = mv_mbus_dram_info(); + if (!dram) + return -ENODEV; + + ahci_mvebu_mbus_config(hpriv, dram); + ahci_mvebu_regret_option(hpriv); + + rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info, + 0, 0, 0); + if (rc) + goto disable_resources; + + return 0; + +disable_resources: + ahci_platform_disable_resources(hpriv); + return rc; +} + +static const struct of_device_id ahci_mvebu_of_match[] = { + { .compatible = "marvell,armada-380-ahci", }, + { }, +}; +MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match); + +/* + * We currently don't provide power management related operations, + * since there is no suspend/resume support at the platform level for + * Armada 38x for the moment. + */ +static struct platform_driver ahci_mvebu_driver = { + .probe = ahci_mvebu_probe, + .remove = ata_platform_remove_one, + .driver = { + .name = "ahci-mvebu", + .owner = THIS_MODULE, + .of_match_table = ahci_mvebu_of_match, + }, +}; +module_platform_driver(ahci_mvebu_driver); + +MODULE_DESCRIPTION("Marvell EBU AHCI SATA driver"); +MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>, Marcin Wojtas <mw@semihalf.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ahci_mvebu"); diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c new file mode 100644 index 00000000000..b10d81ddb52 --- /dev/null +++ b/drivers/ata/ahci_platform.c @@ -0,0 +1,105 @@ +/* + * AHCI SATA platform driver + * + * Copyright 2004-2005 Red Hat, Inc. + * Jeff Garzik <jgarzik@pobox.com> + * Copyright 2010 MontaVista Software, LLC. + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pm.h> +#include <linux/device.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/libata.h> +#include <linux/ahci_platform.h> +#include "ahci.h" + +static const struct ata_port_info ahci_port_info = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_platform_ops, +}; + +static int ahci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ahci_platform_data *pdata = dev_get_platdata(dev); + struct ahci_host_priv *hpriv; + unsigned long hflags = 0; + int rc; + + hpriv = ahci_platform_get_resources(pdev); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); + + rc = ahci_platform_enable_resources(hpriv); + if (rc) + return rc; + + /* + * Some platforms might need to prepare for mmio region access, + * which could be done in the following init call. So, the mmio + * region shouldn't be accessed before init (if provided) has + * returned successfully. + */ + if (pdata && pdata->init) { + rc = pdata->init(dev, hpriv->mmio); + if (rc) + goto disable_resources; + } + + if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) + hflags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ; + + rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, + hflags, 0, 0); + if (rc) + goto pdata_exit; + + return 0; +pdata_exit: + if (pdata && pdata->exit) + pdata->exit(dev); +disable_resources: + ahci_platform_disable_resources(hpriv); + return rc; +} + +static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend, + ahci_platform_resume); + +static const struct of_device_id ahci_of_match[] = { + { .compatible = "snps,spear-ahci", }, + { .compatible = "snps,exynos5440-ahci", }, + { .compatible = "ibm,476gtr-ahci", }, + { .compatible = "snps,dwc-ahci", }, + { .compatible = "hisilicon,hisi-ahci", }, + {}, +}; +MODULE_DEVICE_TABLE(of, ahci_of_match); + +static struct platform_driver ahci_driver = { + .probe = ahci_probe, + .remove = ata_platform_remove_one, + .driver = { + .name = "ahci", + .owner = THIS_MODULE, + .of_match_table = ahci_of_match, + .pm = &ahci_pm_ops, + }, +}; +module_platform_driver(ahci_driver); + +MODULE_DESCRIPTION("AHCI SATA platform driver"); +MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ahci"); diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c new file mode 100644 index 00000000000..2595598df9c --- /dev/null +++ b/drivers/ata/ahci_st.c @@ -0,0 +1,245 @@ +/* + * Copyright (C) 2012 STMicroelectronics Limited + * + * Authors: Francesco Virlinzi <francesco.virlinzi@st.com> + * Alexandre Torgue <alexandre.torgue@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/export.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/ahci_platform.h> +#include <linux/libata.h> +#include <linux/reset.h> +#include <linux/io.h> +#include <linux/dma-mapping.h> + +#include "ahci.h" + +#define ST_AHCI_OOBR 0xbc +#define ST_AHCI_OOBR_WE BIT(31) +#define ST_AHCI_OOBR_CWMIN_SHIFT 24 +#define ST_AHCI_OOBR_CWMAX_SHIFT 16 +#define ST_AHCI_OOBR_CIMIN_SHIFT 8 +#define ST_AHCI_OOBR_CIMAX_SHIFT 0 + +struct st_ahci_drv_data { + struct platform_device *ahci; + struct reset_control *pwr; + struct reset_control *sw_rst; + struct reset_control *pwr_rst; + struct ahci_host_priv *hpriv; +}; + +static void st_ahci_configure_oob(void __iomem *mmio) +{ + unsigned long old_val, new_val; + + new_val = (0x02 << ST_AHCI_OOBR_CWMIN_SHIFT) | + (0x04 << ST_AHCI_OOBR_CWMAX_SHIFT) | + (0x08 << ST_AHCI_OOBR_CIMIN_SHIFT) | + (0x0C << ST_AHCI_OOBR_CIMAX_SHIFT); + + old_val = readl(mmio + ST_AHCI_OOBR); + writel(old_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR); + writel(new_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR); + writel(new_val, mmio + ST_AHCI_OOBR); +} + +static int st_ahci_deassert_resets(struct device *dev) +{ + struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); + int err; + + if (drv_data->pwr) { + err = reset_control_deassert(drv_data->pwr); + if (err) { + dev_err(dev, "unable to bring out of pwrdwn\n"); + return err; + } + } + + st_ahci_configure_oob(drv_data->hpriv->mmio); + + if (drv_data->sw_rst) { + err = reset_control_deassert(drv_data->sw_rst); + if (err) { + dev_err(dev, "unable to bring out of sw-rst\n"); + return err; + } + } + + if (drv_data->pwr_rst) { + err = reset_control_deassert(drv_data->pwr_rst); + if (err) { + dev_err(dev, "unable to bring out of pwr-rst\n"); + return err; + } + } + + return 0; +} + +static void st_ahci_host_stop(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + struct device *dev = host->dev; + struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); + int err; + + if (drv_data->pwr) { + err = reset_control_assert(drv_data->pwr); + if (err) + dev_err(dev, "unable to pwrdwn\n"); + } + + ahci_platform_disable_resources(hpriv); +} + +static int st_ahci_probe_resets(struct platform_device *pdev) +{ + struct st_ahci_drv_data *drv_data = platform_get_drvdata(pdev); + + drv_data->pwr = devm_reset_control_get(&pdev->dev, "pwr-dwn"); + if (IS_ERR(drv_data->pwr)) { + dev_info(&pdev->dev, "power reset control not defined\n"); + drv_data->pwr = NULL; + } + + drv_data->sw_rst = devm_reset_control_get(&pdev->dev, "sw-rst"); + if (IS_ERR(drv_data->sw_rst)) { + dev_info(&pdev->dev, "soft reset control not defined\n"); + drv_data->sw_rst = NULL; + } + + drv_data->pwr_rst = devm_reset_control_get(&pdev->dev, "pwr-rst"); + if (IS_ERR(drv_data->pwr_rst)) { + dev_dbg(&pdev->dev, "power soft reset control not defined\n"); + drv_data->pwr_rst = NULL; + } + + return st_ahci_deassert_resets(&pdev->dev); +} + +static struct ata_port_operations st_ahci_port_ops = { + .inherits = &ahci_platform_ops, + .host_stop = st_ahci_host_stop, +}; + +static const struct ata_port_info st_ahci_port_info = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &st_ahci_port_ops, +}; + +static int st_ahci_probe(struct platform_device *pdev) +{ + struct st_ahci_drv_data *drv_data; + struct ahci_host_priv *hpriv; + int err; + + drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL); + if (!drv_data) + return -ENOMEM; + + platform_set_drvdata(pdev, drv_data); + + hpriv = ahci_platform_get_resources(pdev); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); + + drv_data->hpriv = hpriv; + + err = st_ahci_probe_resets(pdev); + if (err) + return err; + + err = ahci_platform_enable_resources(hpriv); + if (err) + return err; + + err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, 0, 0, 0); + if (err) { + ahci_platform_disable_resources(hpriv); + return err; + } + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int st_ahci_suspend(struct device *dev) +{ + struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = drv_data->hpriv; + int err; + + err = ahci_platform_suspend_host(dev); + if (err) + return err; + + if (drv_data->pwr) { + err = reset_control_assert(drv_data->pwr); + if (err) { + dev_err(dev, "unable to pwrdwn"); + return err; + } + } + + ahci_platform_disable_resources(hpriv); + + return 0; +} + +static int st_ahci_resume(struct device *dev) +{ + struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = drv_data->hpriv; + int err; + + err = ahci_platform_enable_resources(hpriv); + if (err) + return err; + + err = st_ahci_deassert_resets(dev); + if (err) { + ahci_platform_disable_resources(hpriv); + return err; + } + + return ahci_platform_resume_host(dev); +} +#endif + +static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume); + +static struct of_device_id st_ahci_match[] = { + { .compatible = "st,ahci", }, + {}, +}; +MODULE_DEVICE_TABLE(of, st_ahci_match); + +static struct platform_driver st_ahci_driver = { + .driver = { + .name = "st_ahci", + .owner = THIS_MODULE, + .pm = &st_ahci_pm_ops, + .of_match_table = of_match_ptr(st_ahci_match), + }, + .probe = st_ahci_probe, + .remove = ata_platform_remove_one, +}; +module_platform_driver(st_ahci_driver); + +MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@st.com>"); +MODULE_AUTHOR("Francesco Virlinzi <francesco.virlinzi@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics SATA AHCI Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c new file mode 100644 index 00000000000..02002f125bd --- /dev/null +++ b/drivers/ata/ahci_sunxi.c @@ -0,0 +1,252 @@ +/* + * Allwinner sunxi AHCI SATA platform driver + * Copyright 2013 Olliver Schinagl <oliver@schinagl.nl> + * Copyright 2014 Hans de Goede <hdegoede@redhat.com> + * + * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov + * Based on code from Allwinner Technology Co., Ltd. <www.allwinnertech.com>, + * Daniel Wang <danielwang@allwinnertech.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/ahci_platform.h> +#include <linux/clk.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include "ahci.h" + +#define AHCI_BISTAFR 0x00a0 +#define AHCI_BISTCR 0x00a4 +#define AHCI_BISTFCTR 0x00a8 +#define AHCI_BISTSR 0x00ac +#define AHCI_BISTDECR 0x00b0 +#define AHCI_DIAGNR0 0x00b4 +#define AHCI_DIAGNR1 0x00b8 +#define AHCI_OOBR 0x00bc +#define AHCI_PHYCS0R 0x00c0 +#define AHCI_PHYCS1R 0x00c4 +#define AHCI_PHYCS2R 0x00c8 +#define AHCI_TIMER1MS 0x00e0 +#define AHCI_GPARAM1R 0x00e8 +#define AHCI_GPARAM2R 0x00ec +#define AHCI_PPARAMR 0x00f0 +#define AHCI_TESTR 0x00f4 +#define AHCI_VERSIONR 0x00f8 +#define AHCI_IDR 0x00fc +#define AHCI_RWCR 0x00fc +#define AHCI_P0DMACR 0x0170 +#define AHCI_P0PHYCR 0x0178 +#define AHCI_P0PHYSR 0x017c + +static void sunxi_clrbits(void __iomem *reg, u32 clr_val) +{ + u32 reg_val; + + reg_val = readl(reg); + reg_val &= ~(clr_val); + writel(reg_val, reg); +} + +static void sunxi_setbits(void __iomem *reg, u32 set_val) +{ + u32 reg_val; + + reg_val = readl(reg); + reg_val |= set_val; + writel(reg_val, reg); +} + +static void sunxi_clrsetbits(void __iomem *reg, u32 clr_val, u32 set_val) +{ + u32 reg_val; + + reg_val = readl(reg); + reg_val &= ~(clr_val); + reg_val |= set_val; + writel(reg_val, reg); +} + +static u32 sunxi_getbits(void __iomem *reg, u8 mask, u8 shift) +{ + return (readl(reg) >> shift) & mask; +} + +static int ahci_sunxi_phy_init(struct device *dev, void __iomem *reg_base) +{ + u32 reg_val; + int timeout; + + /* This magic is from the original code */ + writel(0, reg_base + AHCI_RWCR); + msleep(5); + + sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(19)); + sunxi_clrsetbits(reg_base + AHCI_PHYCS0R, + (0x7 << 24), + (0x5 << 24) | BIT(23) | BIT(18)); + sunxi_clrsetbits(reg_base + AHCI_PHYCS1R, + (0x3 << 16) | (0x1f << 8) | (0x3 << 6), + (0x2 << 16) | (0x6 << 8) | (0x2 << 6)); + sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(28) | BIT(15)); + sunxi_clrbits(reg_base + AHCI_PHYCS1R, BIT(19)); + sunxi_clrsetbits(reg_base + AHCI_PHYCS0R, + (0x7 << 20), (0x3 << 20)); + sunxi_clrsetbits(reg_base + AHCI_PHYCS2R, + (0x1f << 5), (0x19 << 5)); + msleep(5); + + sunxi_setbits(reg_base + AHCI_PHYCS0R, (0x1 << 19)); + + timeout = 250; /* Power up takes aprox 50 us */ + do { + reg_val = sunxi_getbits(reg_base + AHCI_PHYCS0R, 0x7, 28); + if (reg_val == 0x02) + break; + + if (--timeout == 0) { + dev_err(dev, "PHY power up failed.\n"); + return -EIO; + } + udelay(1); + } while (1); + + sunxi_setbits(reg_base + AHCI_PHYCS2R, (0x1 << 24)); + + timeout = 100; /* Calibration takes aprox 10 us */ + do { + reg_val = sunxi_getbits(reg_base + AHCI_PHYCS2R, 0x1, 24); + if (reg_val == 0x00) + break; + + if (--timeout == 0) { + dev_err(dev, "PHY calibration failed.\n"); + return -EIO; + } + udelay(1); + } while (1); + + msleep(15); + + writel(0x7, reg_base + AHCI_RWCR); + + return 0; +} + +static void ahci_sunxi_start_engine(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_host_priv *hpriv = ap->host->private_data; + + /* Setup DMA before DMA start */ + sunxi_clrsetbits(hpriv->mmio + AHCI_P0DMACR, 0x0000ff00, 0x00004400); + + /* Start DMA */ + sunxi_setbits(port_mmio + PORT_CMD, PORT_CMD_START); +} + +static const struct ata_port_info ahci_sunxi_port_info = { + .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_platform_ops, +}; + +static int ahci_sunxi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ahci_host_priv *hpriv; + unsigned long hflags; + int rc; + + hpriv = ahci_platform_get_resources(pdev); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); + + hpriv->start_engine = ahci_sunxi_start_engine; + + rc = ahci_platform_enable_resources(hpriv); + if (rc) + return rc; + + rc = ahci_sunxi_phy_init(dev, hpriv->mmio); + if (rc) + goto disable_resources; + + hflags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI | + AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ; + + rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info, + hflags, 0, 0); + if (rc) + goto disable_resources; + + return 0; + +disable_resources: + ahci_platform_disable_resources(hpriv); + return rc; +} + +#ifdef CONFIG_PM_SLEEP +static int ahci_sunxi_resume(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + int rc; + + rc = ahci_platform_enable_resources(hpriv); + if (rc) + return rc; + + rc = ahci_sunxi_phy_init(dev, hpriv->mmio); + if (rc) + goto disable_resources; + + rc = ahci_platform_resume_host(dev); + if (rc) + goto disable_resources; + + return 0; + +disable_resources: + ahci_platform_disable_resources(hpriv); + return rc; +} +#endif + +static SIMPLE_DEV_PM_OPS(ahci_sunxi_pm_ops, ahci_platform_suspend, + ahci_sunxi_resume); + +static const struct of_device_id ahci_sunxi_of_match[] = { + { .compatible = "allwinner,sun4i-a10-ahci", }, + { }, +}; +MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match); + +static struct platform_driver ahci_sunxi_driver = { + .probe = ahci_sunxi_probe, + .remove = ata_platform_remove_one, + .driver = { + .name = "ahci-sunxi", + .owner = THIS_MODULE, + .of_match_table = ahci_sunxi_of_match, + .pm = &ahci_sunxi_pm_ops, + }, +}; +module_platform_driver(ahci_sunxi_driver); + +MODULE_DESCRIPTION("Allwinner sunxi AHCI SATA driver"); +MODULE_AUTHOR("Olliver Schinagl <oliver@schinagl.nl>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c new file mode 100644 index 00000000000..ee3a3659bd9 --- /dev/null +++ b/drivers/ata/ahci_xgene.c @@ -0,0 +1,523 @@ +/* + * AppliedMicro X-Gene SoC SATA Host Controller Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Author: Loc Ho <lho@apm.com> + * Tuan Phan <tphan@apm.com> + * Suman Tripathi <stripathi@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * NOTE: PM support is not currently available. + * + */ +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/ahci_platform.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/phy/phy.h> +#include "ahci.h" + +/* Max # of disk per a controller */ +#define MAX_AHCI_CHN_PERCTR 2 + +/* MUX CSR */ +#define SATA_ENET_CONFIG_REG 0x00000000 +#define CFG_SATA_ENET_SELECT_MASK 0x00000001 + +/* SATA core host controller CSR */ +#define SLVRDERRATTRIBUTES 0x00000000 +#define SLVWRERRATTRIBUTES 0x00000004 +#define MSTRDERRATTRIBUTES 0x00000008 +#define MSTWRERRATTRIBUTES 0x0000000c +#define BUSCTLREG 0x00000014 +#define IOFMSTRWAUX 0x00000018 +#define INTSTATUSMASK 0x0000002c +#define ERRINTSTATUS 0x00000030 +#define ERRINTSTATUSMASK 0x00000034 + +/* SATA host AHCI CSR */ +#define PORTCFG 0x000000a4 +#define PORTADDR_SET(dst, src) \ + (((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f)) +#define PORTPHY1CFG 0x000000a8 +#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \ + (((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000)) +#define PORTPHY2CFG 0x000000ac +#define PORTPHY3CFG 0x000000b0 +#define PORTPHY4CFG 0x000000b4 +#define PORTPHY5CFG 0x000000b8 +#define SCTL0 0x0000012C +#define PORTPHY5CFG_RTCHG_SET(dst, src) \ + (((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000)) +#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \ + (((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000)) +#define PORTAXICFG 0x000000bc +#define PORTAXICFG_OUTTRANS_SET(dst, src) \ + (((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000)) + +/* SATA host controller AXI CSR */ +#define INT_SLV_TMOMASK 0x00000010 + +/* SATA diagnostic CSR */ +#define CFG_MEM_RAM_SHUTDOWN 0x00000070 +#define BLOCK_MEM_RDY 0x00000074 + +struct xgene_ahci_context { + struct ahci_host_priv *hpriv; + struct device *dev; + u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/ + void __iomem *csr_core; /* Core CSR address of IP */ + void __iomem *csr_diag; /* Diag CSR address of IP */ + void __iomem *csr_axi; /* AXI CSR address of IP */ + void __iomem *csr_mux; /* MUX CSR address of IP */ +}; + +static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx) +{ + dev_dbg(ctx->dev, "Release memory from shutdown\n"); + writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); + readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */ + msleep(1); /* reset may take up to 1ms */ + if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) { + dev_err(ctx->dev, "failed to release memory from shutdown\n"); + return -ENODEV; + } + return 0; +} + +/** + * xgene_ahci_restart_engine - Restart the dma engine. + * @ap : ATA port of interest + * + * Restarts the dma engine inside the controller. + */ +static int xgene_ahci_restart_engine(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + + ahci_stop_engine(ap); + ahci_start_fis_rx(ap); + hpriv->start_engine(ap); + + return 0; +} + +/** + * xgene_ahci_qc_issue - Issue commands to the device + * @qc: Command to issue + * + * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot + * clear the BSY bit after receiving the PIO setup FIS. This results in the dma + * state machine goes into the CMFatalErrorUpdate state and locks up. By + * restarting the dma engine, it removes the controller out of lock up state. + */ +static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; + struct xgene_ahci_context *ctx = hpriv->plat_data; + int rc = 0; + + if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA)) + xgene_ahci_restart_engine(ap); + + rc = ahci_qc_issue(qc); + + /* Save the last command issued */ + ctx->last_cmd[ap->port_no] = qc->tf.command; + + return rc; +} + +/** + * xgene_ahci_read_id - Read ID data from the specified device + * @dev: device + * @tf: proposed taskfile + * @id: data buffer + * + * This custom read ID function is required due to the fact that the HW + * does not support DEVSLP. + */ +static unsigned int xgene_ahci_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id) +{ + u32 err_mask; + + err_mask = ata_do_dev_read_id(dev, tf, id); + if (err_mask) + return err_mask; + + /* + * Mask reserved area. Word78 spec of Link Power Management + * bit15-8: reserved + * bit7: NCQ autosence + * bit6: Software settings preservation supported + * bit5: reserved + * bit4: In-order sata delivery supported + * bit3: DIPM requests supported + * bit2: DMA Setup FIS Auto-Activate optimization supported + * bit1: DMA Setup FIX non-Zero buffer offsets supported + * bit0: Reserved + * + * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP + */ + id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8); + + return 0; +} + +static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel) +{ + void __iomem *mmio = ctx->hpriv->mmio; + u32 val; + + dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n", + mmio, channel); + val = readl(mmio + PORTCFG); + val = PORTADDR_SET(val, channel == 0 ? 2 : 3); + writel(val, mmio + PORTCFG); + readl(mmio + PORTCFG); /* Force a barrier */ + /* Disable fix rate */ + writel(0x0001fffe, mmio + PORTPHY1CFG); + readl(mmio + PORTPHY1CFG); /* Force a barrier */ + writel(0x5018461c, mmio + PORTPHY2CFG); + readl(mmio + PORTPHY2CFG); /* Force a barrier */ + writel(0x1c081907, mmio + PORTPHY3CFG); + readl(mmio + PORTPHY3CFG); /* Force a barrier */ + writel(0x1c080815, mmio + PORTPHY4CFG); + readl(mmio + PORTPHY4CFG); /* Force a barrier */ + /* Set window negotiation */ + val = readl(mmio + PORTPHY5CFG); + val = PORTPHY5CFG_RTCHG_SET(val, 0x300); + writel(val, mmio + PORTPHY5CFG); + readl(mmio + PORTPHY5CFG); /* Force a barrier */ + val = readl(mmio + PORTAXICFG); + val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */ + val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */ + writel(val, mmio + PORTAXICFG); + readl(mmio + PORTAXICFG); /* Force a barrier */ +} + +/** + * xgene_ahci_do_hardreset - Issue the actual COMRESET + * @link: link to reset + * @deadline: deadline jiffies for the operation + * @online: Return value to indicate if device online + * + * Due to the limitation of the hardware PHY, a difference set of setting is + * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps), + * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will + * report disparity error and etc. In addition, during COMRESET, there can + * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and + * SERR_10B_8B_ERR, the PHY receiver line must be reseted. The following + * algorithm is followed to proper configure the hardware PHY during COMRESET: + * + * Alg Part 1: + * 1. Start the PHY at Gen3 speed (default setting) + * 2. Issue the COMRESET + * 3. If no link, go to Alg Part 3 + * 4. If link up, determine if the negotiated speed matches the PHY + * configured speed + * 5. If they matched, go to Alg Part 2 + * 6. If they do not matched and first time, configure the PHY for the linked + * up disk speed and repeat step 2 + * 7. Go to Alg Part 2 + * + * Alg Part 2: + * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error + * reported in the register PORT_SCR_ERR, then reset the PHY receiver line + * 2. Go to Alg Part 3 + * + * Alg Part 3: + * 1. Clear any pending from register PORT_SCR_ERR. + * + * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition + * and until the underlying PHY supports an method to reset the receiver + * line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors, + * an warning message will be printed. + */ +static int xgene_ahci_do_hardreset(struct ata_link *link, + unsigned long deadline, bool *online) +{ + const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); + struct ata_port *ap = link->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; + struct xgene_ahci_context *ctx = hpriv->plat_data; + struct ahci_port_priv *pp = ap->private_data; + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; + void __iomem *port_mmio = ahci_port_base(ap); + struct ata_taskfile tf; + int rc; + u32 val; + + /* clear D2H reception area to properly wait for D2H FIS */ + ata_tf_init(link->device, &tf); + tf.command = ATA_BUSY; + ata_tf_to_fis(&tf, 0, 0, d2h_fis); + rc = sata_link_hardreset(link, timing, deadline, online, + ahci_check_ready); + + val = readl(port_mmio + PORT_SCR_ERR); + if (val & (SERR_DISPARITY | SERR_10B_8B_ERR)) + dev_warn(ctx->dev, "link has error\n"); + + /* clear all errors if any pending */ + val = readl(port_mmio + PORT_SCR_ERR); + writel(val, port_mmio + PORT_SCR_ERR); + + return rc; +} + +static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + bool online; + int rc; + u32 portcmd_saved; + u32 portclb_saved; + u32 portclbhi_saved; + u32 portrxfis_saved; + u32 portrxfishi_saved; + + /* As hardreset resets these CSR, save it to restore later */ + portcmd_saved = readl(port_mmio + PORT_CMD); + portclb_saved = readl(port_mmio + PORT_LST_ADDR); + portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI); + portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR); + portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI); + + ahci_stop_engine(ap); + + rc = xgene_ahci_do_hardreset(link, deadline, &online); + + /* As controller hardreset clears them, restore them */ + writel(portcmd_saved, port_mmio + PORT_CMD); + writel(portclb_saved, port_mmio + PORT_LST_ADDR); + writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI); + writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR); + writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI); + + hpriv->start_engine(ap); + + if (online) + *class = ahci_dev_classify(ap); + + return rc; +} + +static void xgene_ahci_host_stop(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + + ahci_platform_disable_resources(hpriv); +} + +static struct ata_port_operations xgene_ahci_ops = { + .inherits = &ahci_ops, + .host_stop = xgene_ahci_host_stop, + .hardreset = xgene_ahci_hardreset, + .read_id = xgene_ahci_read_id, + .qc_issue = xgene_ahci_qc_issue, +}; + +static const struct ata_port_info xgene_ahci_port_info = { + .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &xgene_ahci_ops, +}; + +static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv) +{ + struct xgene_ahci_context *ctx = hpriv->plat_data; + int i; + int rc; + u32 val; + + /* Remove IP RAM out of shutdown */ + rc = xgene_ahci_init_memram(ctx); + if (rc) + return rc; + + for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++) + xgene_ahci_set_phy_cfg(ctx, i); + + /* AXI disable Mask */ + writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT); + readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */ + writel(0, ctx->csr_core + INTSTATUSMASK); + val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */ + dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n", + INTSTATUSMASK, val); + + writel(0x0, ctx->csr_core + ERRINTSTATUSMASK); + readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */ + writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK); + readl(ctx->csr_axi + INT_SLV_TMOMASK); + + /* Enable AXI Interrupt */ + writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES); + writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES); + writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES); + writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES); + + /* Enable coherency */ + val = readl(ctx->csr_core + BUSCTLREG); + val &= ~0x00000002; /* Enable write coherency */ + val &= ~0x00000001; /* Enable read coherency */ + writel(val, ctx->csr_core + BUSCTLREG); + + val = readl(ctx->csr_core + IOFMSTRWAUX); + val |= (1 << 3); /* Enable read coherency */ + val |= (1 << 9); /* Enable write coherency */ + writel(val, ctx->csr_core + IOFMSTRWAUX); + val = readl(ctx->csr_core + IOFMSTRWAUX); + dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n", + IOFMSTRWAUX, val); + + return rc; +} + +static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx) +{ + u32 val; + + /* Check for optional MUX resource */ + if (IS_ERR(ctx->csr_mux)) + return 0; + + val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG); + val &= ~CFG_SATA_ENET_SELECT_MASK; + writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG); + val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG); + return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0; +} + +static int xgene_ahci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ahci_host_priv *hpriv; + struct xgene_ahci_context *ctx; + struct resource *res; + unsigned long hflags; + int rc; + + hpriv = ahci_platform_get_resources(pdev); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + hpriv->plat_data = ctx; + ctx->hpriv = hpriv; + ctx->dev = dev; + + /* Retrieve the IP core resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ctx->csr_core = devm_ioremap_resource(dev, res); + if (IS_ERR(ctx->csr_core)) + return PTR_ERR(ctx->csr_core); + + /* Retrieve the IP diagnostic resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + ctx->csr_diag = devm_ioremap_resource(dev, res); + if (IS_ERR(ctx->csr_diag)) + return PTR_ERR(ctx->csr_diag); + + /* Retrieve the IP AXI resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 3); + ctx->csr_axi = devm_ioremap_resource(dev, res); + if (IS_ERR(ctx->csr_axi)) + return PTR_ERR(ctx->csr_axi); + + /* Retrieve the optional IP mux resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 4); + ctx->csr_mux = devm_ioremap_resource(dev, res); + + dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core, + hpriv->mmio); + + /* Select ATA */ + if ((rc = xgene_ahci_mux_select(ctx))) { + dev_err(dev, "SATA mux selection failed error %d\n", rc); + return -ENODEV; + } + + /* Due to errata, HW requires full toggle transition */ + rc = ahci_platform_enable_clks(hpriv); + if (rc) + goto disable_resources; + ahci_platform_disable_clks(hpriv); + + rc = ahci_platform_enable_resources(hpriv); + if (rc) + goto disable_resources; + + /* Configure the host controller */ + xgene_ahci_hw_init(hpriv); + + /* + * Setup DMA mask. This is preliminary until the DMA range is sorted + * out. + */ + rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (rc) { + dev_err(dev, "Unable to set dma mask\n"); + goto disable_resources; + } + + hflags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ; + + rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info, + hflags, 0, 0); + if (rc) + goto disable_resources; + + dev_dbg(dev, "X-Gene SATA host controller initialized\n"); + return 0; + +disable_resources: + ahci_platform_disable_resources(hpriv); + return rc; +} + +static const struct of_device_id xgene_ahci_of_match[] = { + {.compatible = "apm,xgene-ahci"}, + {}, +}; +MODULE_DEVICE_TABLE(of, xgene_ahci_of_match); + +static struct platform_driver xgene_ahci_driver = { + .probe = xgene_ahci_probe, + .remove = ata_platform_remove_one, + .driver = { + .name = "xgene-ahci", + .owner = THIS_MODULE, + .of_match_table = xgene_ahci_of_match, + }, +}; + +module_platform_driver(xgene_ahci_driver); + +MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver"); +MODULE_AUTHOR("Loc Ho <lho@apm.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.4"); diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index 377425e7139..9ff545ce8da 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c @@ -1,6 +1,6 @@ /* * ata_generic.c - Generic PATA/SATA controller driver. - * Copyright 2005 Red Hat Inc <alan@redhat.com>, all rights reserved. + * Copyright 2005 Red Hat Inc, all rights reserved. * * Elements from ide/pci/generic.c * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> @@ -19,51 +19,28 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "ata_generic" -#define DRV_VERSION "0.2.6" +#define DRV_VERSION "0.2.15" /* * A generic parallel ATA driver using libata */ -/** - * generic_pre_reset - probe begin - * @ap: ATA port - * - * Set up cable type and use generic probe init - */ - -static int generic_pre_reset(struct ata_port *ap) -{ - ap->cbl = ATA_CBL_PATA80; - return ata_std_prereset(ap); -} - - -/** - * generic_error_handler - Probe specified port on PATA host controller - * @ap: Port to probe - * @classes: - * - * LOCKING: - * None (inherited from caller). - */ - - -static void generic_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, generic_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} +enum { + ATA_GEN_CLASS_MATCH = (1 << 0), + ATA_GEN_FORCE_DMA = (1 << 1), + ATA_GEN_INTEL_IDER = (1 << 2), +}; /** * generic_set_mode - mode setting - * @ap: interface to set up + * @link: link to set up + * @unused: returned device on error * * Use a non standard set_mode function. We don't want to be tuned. * The BIOS configured everything. Our job is not to fiddle. We @@ -71,90 +48,109 @@ static void generic_error_handler(struct ata_port *ap) * and respect them. */ -static void generic_set_mode(struct ata_port *ap) +static int generic_set_mode(struct ata_link *link, struct ata_device **unused) { + struct ata_port *ap = link->ap; + const struct pci_device_id *id = ap->host->private_data; int dma_enabled = 0; - int i; - - /* Bits 5 and 6 indicate if DMA is active on master/slave */ - if (ap->ioaddr.bmdma_addr) - dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; - if (ata_dev_enabled(dev)) { - /* We don't really care */ - dev->pio_mode = XFER_PIO_0; - dev->dma_mode = XFER_MW_DMA_0; - /* We do need the right mode information for DMA or PIO - and this comes from the current configuration flags */ - if (dma_enabled & (1 << (5 + i))) { - dev->xfer_mode = XFER_MW_DMA_0; - dev->xfer_shift = ATA_SHIFT_MWDMA; - dev->flags &= ~ATA_DFLAG_PIO; - } else { - dev->xfer_mode = XFER_PIO_0; - dev->xfer_shift = ATA_SHIFT_PIO; - dev->flags |= ATA_DFLAG_PIO; + struct ata_device *dev; + + if (id->driver_data & ATA_GEN_FORCE_DMA) { + dma_enabled = 0xff; + } else if (ap->ioaddr.bmdma_addr) { + /* Bits 5 and 6 indicate if DMA is active on master/slave */ + dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + } + + ata_for_each_dev(dev, link, ENABLED) { + /* We don't really care */ + dev->pio_mode = XFER_PIO_0; + dev->dma_mode = XFER_MW_DMA_0; + /* We do need the right mode information for DMA or PIO + and this comes from the current configuration flags */ + if (dma_enabled & (1 << (5 + dev->devno))) { + unsigned int xfer_mask = ata_id_xfermask(dev->id); + const char *name; + + if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) + name = ata_mode_string(xfer_mask); + else { + /* SWDMA perhaps? */ + name = "DMA"; + xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0); } + + ata_dev_info(dev, "configured for %s\n", name); + + dev->xfer_mode = ata_xfer_mask2mode(xfer_mask); + dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode); + dev->flags &= ~ATA_DFLAG_PIO; + } else { + ata_dev_info(dev, "configured for PIO\n"); + dev->xfer_mode = XFER_PIO_0; + dev->xfer_shift = ATA_SHIFT_PIO; + dev->flags |= ATA_DFLAG_PIO; } } + return 0; } static struct scsi_host_template generic_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations generic_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_unknown, .set_mode = generic_set_mode, - - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .data_xfer = ata_pio_data_xfer, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = generic_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; static int all_generic_ide; /* Set to claim all devices */ /** + * is_intel_ider - identify intel IDE-R devices + * @dev: PCI device + * + * Distinguish Intel IDE-R controller devices from other Intel IDE + * devices. IDE-R devices have no timing registers and are in + * most respects virtual. They should be driven by the ata_generic + * driver. + * + * IDE-R devices have PCI offset 0xF8.L as zero, later Intel ATA has + * it non zero. All Intel ATA has 0x40 writable (timing), but it is + * not writable on IDE-R devices (this is guaranteed). + */ + +static int is_intel_ider(struct pci_dev *dev) +{ + /* For Intel IDE the value at 0xF8 is only zero on IDE-R + interfaces */ + u32 r; + u16 t; + + /* Check the manufacturing ID, it will be zero for IDE-R */ + pci_read_config_dword(dev, 0xF8, &r); + /* Not IDE-R: punt so that ata_(old)piix gets it */ + if (r != 0) + return 0; + /* 0xF8 will also be zero on some early Intel IDE devices + but they will have a sane timing register */ + pci_read_config_word(dev, 0x40, &t); + if (t != 0) + return 0; + /* Finally check if the timing register is writable so that + we eliminate any early devices hot-docked in a docking + station */ + pci_write_config_word(dev, 0x40, 1); + pci_read_config_word(dev, 0x40, &t); + if (t) { + pci_write_config_word(dev, 0x40, 0); + return 0; + } + return 1; +} + +/** * ata_generic_init - attach generic IDE * @dev: PCI device found * @id: match entry @@ -167,20 +163,23 @@ static int all_generic_ide; /* Set to claim all devices */ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id *id) { u16 command; - static struct ata_port_info info = { - .sht = &generic_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &generic_port_ops }; - static struct ata_port_info *port_info[2] = { &info, &info }; + const struct ata_port_info *ppi[] = { &info, NULL }; /* Don't use the generic entry unless instructed to do so */ - if (id->driver_data == 1 && all_generic_ide == 0) + if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0) return -ENODEV; + if ((id->driver_data & ATA_GEN_INTEL_IDER) && !all_generic_ide) + if (!is_intel_ider(dev)) + return -ENODEV; + /* Devices that need care */ if (dev->vendor == PCI_VENDOR_ID_UMC && dev->device == PCI_DEVICE_ID_UMC_UM8886A && @@ -199,9 +198,15 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id return -ENODEV; if (dev->vendor == PCI_VENDOR_ID_AL) - ata_pci_clear_simplex(dev); + ata_pci_bmdma_clear_simplex(dev); - return ata_pci_init_one(dev, port_info, 2); + if (dev->vendor == PCI_VENDOR_ID_ATI) { + int rc = pcim_enable_device(dev); + if (rc < 0) + return rc; + pcim_pin_device(dev); + } + return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0); } static struct pci_device_id ata_generic[] = { @@ -213,11 +218,21 @@ static struct pci_device_id ata_generic[] = { { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, - { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), }, + { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), + .driver_data = ATA_GEN_FORCE_DMA }, +#if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE) { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), }, + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, +#endif + /* Intel, IDE class device */ + { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, + .driver_data = ATA_GEN_INTEL_IDER }, /* Must come last. If you add entries adjust this table appropriately */ - { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1}, + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL), + .driver_data = ATA_GEN_CLASS_MATCH }, { 0, }, }; @@ -225,20 +240,14 @@ static struct pci_driver ata_generic_pci_driver = { .name = DRV_NAME, .id_table = ata_generic, .probe = ata_generic_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init ata_generic_init(void) -{ - return pci_module_init(&ata_generic_pci_driver); -} - - -static void __exit ata_generic_exit(void) -{ - pci_unregister_driver(&ata_generic_pci_driver); -} - +module_pci_driver(ata_generic_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for generic ATA"); @@ -246,7 +255,4 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ata_generic); MODULE_VERSION(DRV_VERSION); -module_init(ata_generic_init); -module_exit(ata_generic_exit); - module_param(all_generic_ide, int, 0); diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 5719704eb0e..893e30e9a9e 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -1,7 +1,7 @@ /* * ata_piix.c - Intel PATA/SATA controllers * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -14,7 +14,7 @@ * * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> - * Copyright (C) 2003 Red Hat Inc <alan@redhat.com> + * Copyright (C) 2003 Red Hat Inc * * * This program is free software; you can redistribute it and/or modify @@ -38,16 +38,16 @@ * Hardware documentation available at http://developer.intel.com/ * * Documentation - * Publically available from Intel web site. Errata documentation - * is also publically available. As an aide to anyone hacking on this - * driver the list of errata that are relevant is below.going back to + * Publicly available from Intel web site. Errata documentation + * is also publicly available. As an aide to anyone hacking on this + * driver the list of errata that are relevant is below, going back to * PIIX4. Older device documentation is now a bit tricky to find. * - * The chipsets all follow very much the same design. The orginal Triton - * series chipsets do _not_ support independant device timings, but this + * The chipsets all follow very much the same design. The original Triton + * series chipsets do _not_ support independent device timings, but this * is fixed in Triton II. With the odd mobile exception the chips then * change little except in gaining more modes until SATA arrives. This - * driver supports only the chips with independant timing (that is those + * driver supports only the chips with independent timing (that is those * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix * for the early chip drivers. * @@ -72,6 +72,7 @@ * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary * and must be dword aligned * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3 + * ICH7 errata #16 - MWDMA1 timings are incorrect * * Should have been BIOS fixed: * 450NX: errata #19 - DMA hangs on old 450NX @@ -89,89 +90,92 @@ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/gfp.h> #include <scsi/scsi_host.h> #include <linux/libata.h> +#include <linux/dmi.h> #define DRV_NAME "ata_piix" -#define DRV_VERSION "2.00ac6" +#define DRV_VERSION "2.13" enum { PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ - ICH5_PMR = 0x90, /* port mapping register */ + ICH5_PMR = 0x90, /* address map register */ ICH5_PCS = 0x92, /* port control and status */ - PIIX_SCC = 0x0A, /* sub-class code register */ + PIIX_SIDPR_BAR = 5, + PIIX_SIDPR_LEN = 16, + PIIX_SIDPR_IDX = 0, + PIIX_SIDPR_DATA = 4, - PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */ - PIIX_FLAG_SCR = (1 << 26), /* SCR available */ - PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ + PIIX_FLAG_SIDPR = (1 << 29), /* SATA idx/data pair regs */ - /* combined mode. if set, PATA is channel 0. - * if clear, PATA is channel 1. - */ - PIIX_PORT_ENABLED = (1 << 0), - PIIX_PORT_PRESENT = (1 << 4), + PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS, + PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR, + + PIIX_FLAG_PIO16 = (1 << 30), /*support 16bit PIO only*/ PIIX_80C_PRI = (1 << 5) | (1 << 4), PIIX_80C_SEC = (1 << 7) | (1 << 6), - /* controller IDs */ - piix_pata_33 = 0, /* PIIX3 or 4 at 33Mhz */ - ich_pata_33 = 1, /* ICH up to UDMA 33 only */ - ich_pata_66 = 2, /* ICH up to 66 Mhz */ - ich_pata_100 = 3, /* ICH up to UDMA 100 */ - ich_pata_133 = 4, /* ICH up to UDMA 133 */ - ich5_sata = 5, - esb_sata = 6, - ich6_sata = 7, - ich6_sata_ahci = 8, - ich6m_sata_ahci = 9, - ich7m_sata_ahci = 10, - ich8_sata_ahci = 11, - /* constants for mapping table */ P0 = 0, /* port 0 */ P1 = 1, /* port 1 */ P2 = 2, /* port 2 */ P3 = 3, /* port 3 */ IDE = -1, /* IDE */ - NA = -2, /* not avaliable */ + NA = -2, /* not available */ RV = -3, /* reserved */ PIIX_AHCI_DEVICE = 6, + + /* host->flags bits */ + PIIX_HOST_BROKEN_SUSPEND = (1 << 24), +}; + +enum piix_controller_ids { + /* controller IDs */ + piix_pata_mwdma, /* PIIX3 MWDMA only */ + piix_pata_33, /* PIIX4 at 33Mhz */ + ich_pata_33, /* ICH up to UDMA 33 only */ + ich_pata_66, /* ICH up to 66 Mhz */ + ich_pata_100, /* ICH up to UDMA 100 */ + ich_pata_100_nomwdma1, /* ICH up to UDMA 100 but with no MWDMA1*/ + ich5_sata, + ich6_sata, + ich6m_sata, + ich8_sata, + ich8_2port_sata, + ich8m_apple_sata, /* locks up on second port enable */ + tolapai_sata, + piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ + ich8_sata_snb, + ich8_2port_sata_snb, + ich8_2port_sata_byt, }; struct piix_map_db { const u32 mask; const u16 port_enable; - const int present_shift; const int map[][4]; }; struct piix_host_priv { const int *map; - const struct piix_map_db *map_db; + u32 saved_iocfg; + void __iomem *sidpr; }; -static int piix_init_one (struct pci_dev *pdev, - const struct pci_device_id *ent); -static void piix_host_stop(struct ata_host *host); -static void piix_pata_error_handler(struct ata_port *ap); -static void ich_pata_error_handler(struct ata_port *ap); -static void piix_sata_error_handler(struct ata_port *ap); -static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); -static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); -static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev); - static unsigned int in_module_init = 1; static const struct pci_device_id piix_pci_tbl[] = { -#ifdef ATA_ENABLE_PATA + /* Intel PIIX3 for the 430HX etc */ + { 0x8086, 0x7010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_mwdma }, + /* VMware ICH4 */ + { 0x8086, 0x7111, 0x15ad, 0x1976, 0, 0, piix_pata_vmw }, /* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */ /* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */ { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, - { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, - { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel PIIX4 */ { 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, /* Intel PIIX4 */ @@ -190,11 +194,13 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel ICH3 (E7500/1) UDMA 100 */ { 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, + /* Intel ICH4-L */ + { 0x8086, 0x24C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */ { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel ICH5 */ - { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 }, + { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* C-ICH (i810E2) */ { 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* ESB (855GME/875P + 6300ESB) UDMA 100 */ @@ -202,173 +208,145 @@ static const struct pci_device_id piix_pci_tbl[] = { /* ICH6 (and 6) (i915) UDMA 100 */ { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* ICH7/7-R (i945, i975) UDMA 100*/ - { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 }, - { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, -#endif + { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 }, + { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 }, + /* ICH8 Mobile PATA Controller */ + { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, - /* NOTE: The following PCI ids must be kept in sync with the - * list in drivers/pci/quirks.c. - */ + /* SATA ports */ /* 82801EB (ICH5) */ { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, /* 82801EB (ICH5) */ { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, /* 6300ESB (ICH5 variant with broken PCS present bits) */ - { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata }, + { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, /* 6300ESB pretending RAID */ - { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata }, + { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, /* 82801FB/FW (ICH6/ICH6W) */ { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, /* 82801FR/FRW (ICH6R/ICH6RW) */ - { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, - /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */ - { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, + { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, + /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented). + * Attach iff the controller is in IDE mode. */ + { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata }, /* 82801GB/GR/GH (ICH7, identical to ICH6) */ - { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, - /* 2801GBM/GHM (ICH7M, identical to ICH6M) */ - { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich7m_sata_ahci }, - /* Enterprise Southbridge 2 (where's the datasheet?) */ - { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, - /* SATA Controller 1 IDE (ICH8, no datasheet yet) */ - { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, - /* SATA Controller 2 IDE (ICH8, ditto) */ - { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, - /* Mobile SATA Controller IDE (ICH8M, ditto) */ - { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, + { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, + /* 82801GBM/GHM (ICH7M, identical to ICH6M) */ + { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata }, + /* Enterprise Southbridge 2 (631xESB/632xESB) */ + { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, + /* SATA Controller 1 IDE (ICH8) */ + { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller 2 IDE (ICH8) */ + { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* Mobile SATA Controller IDE (ICH8M), Apple */ + { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata }, + { 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata }, + { 0x8086, 0x2828, 0x106b, 0x00a3, 0, 0, ich8m_apple_sata }, + /* Mobile SATA Controller IDE (ICH8M) */ + { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller IDE (ICH9) */ + { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller IDE (ICH9) */ + { 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (ICH9) */ + { 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (ICH9M) */ + { 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (ICH9M) */ + { 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (ICH9M) */ + { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller IDE (Tolapai) */ + { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata }, + /* SATA Controller IDE (ICH10) */ + { 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller IDE (ICH10) */ + { 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (ICH10) */ + { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller IDE (ICH10) */ + { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (PCH) */ + { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller IDE (PCH) */ + { 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (PCH) */ + { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (PCH) */ + { 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller IDE (PCH) */ + { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (PCH) */ + { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller IDE (CPT) */ + { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (CPT) */ + { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (CPT) */ + { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (CPT) */ + { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (PBG) */ + { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (PBG) */ + { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Panther Point) */ + { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Panther Point) */ + { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Panther Point) */ + { 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Panther Point) */ + { 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Lynx Point) */ + { 0x8086, 0x8c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Lynx Point) */ + { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Lynx Point) */ + { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, + /* SATA Controller IDE (Lynx Point) */ + { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (DH89xxCC) */ + { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Avoton) */ + { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Avoton) */ + { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Avoton) */ + { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Avoton) */ + { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Wellsburg) */ + { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Wellsburg) */ + { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, + /* SATA Controller IDE (Wellsburg) */ + { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Wellsburg) */ + { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (BayTrail) */ + { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, + { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, + /* SATA Controller IDE (Coleto Creek) */ + { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, { } /* terminate list */ }; -static struct pci_driver piix_pci_driver = { - .name = DRV_NAME, - .id_table = piix_pci_tbl, - .probe = piix_init_one, - .remove = ata_pci_remove_one, - .suspend = ata_pci_device_suspend, - .resume = ata_pci_device_resume, -}; - -static struct scsi_host_template piix_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, - .resume = ata_scsi_device_resume, - .suspend = ata_scsi_device_suspend, -}; - -static const struct ata_port_operations piix_pata_ops = { - .port_disable = ata_port_disable, - .set_piomode = piix_set_piomode, - .set_dmamode = piix_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = piix_pata_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = piix_host_stop, -}; - -static const struct ata_port_operations ich_pata_ops = { - .port_disable = ata_port_disable, - .set_piomode = piix_set_piomode, - .set_dmamode = ich_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ich_pata_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, -}; - -static const struct ata_port_operations piix_sata_ops = { - .port_disable = ata_port_disable, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = piix_sata_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = piix_host_stop, -}; - static const struct piix_map_db ich5_map_db = { .mask = 0x7, .port_enable = 0x3, - .present_shift = 4, .map = { /* PM PS SM SS MAP */ { P0, NA, P1, NA }, /* 000b */ @@ -385,7 +363,6 @@ static const struct piix_map_db ich5_map_db = { static const struct piix_map_db ich6_map_db = { .mask = 0x3, .port_enable = 0xf, - .present_shift = 4, .map = { /* PM PS SM SS MAP */ { P0, P2, P1, P3 }, /* 00b */ @@ -398,188 +375,79 @@ static const struct piix_map_db ich6_map_db = { static const struct piix_map_db ich6m_map_db = { .mask = 0x3, .port_enable = 0x5, - .present_shift = 4, + + /* Map 01b isn't specified in the doc but some notebooks use + * it anyway. MAP 01b have been spotted on both ICH6M and + * ICH7M. + */ .map = { /* PM PS SM SS MAP */ - { P0, P2, RV, RV }, /* 00b */ - { RV, RV, RV, RV }, + { P0, P2, NA, NA }, /* 00b */ + { IDE, IDE, P1, P3 }, /* 01b */ { P0, P2, IDE, IDE }, /* 10b */ { RV, RV, RV, RV }, }, }; -static const struct piix_map_db ich7m_map_db = { +static const struct piix_map_db ich8_map_db = { .mask = 0x3, - .port_enable = 0x5, - .present_shift = 4, + .port_enable = 0xf, + .map = { + /* PM PS SM SS MAP */ + { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */ + { RV, RV, RV, RV }, + { P0, P2, IDE, IDE }, /* 10b (IDE mode) */ + { RV, RV, RV, RV }, + }, +}; - /* Map 01b isn't specified in the doc but some notebooks use - * it anyway. ATM, the only case spotted carries subsystem ID - * 1025:0107. This is the only difference from ich6m. - */ +static const struct piix_map_db ich8_2port_map_db = { + .mask = 0x3, + .port_enable = 0x3, .map = { /* PM PS SM SS MAP */ - { P0, P2, RV, RV }, /* 00b */ - { IDE, IDE, P1, P3 }, /* 01b */ + { P0, NA, P1, NA }, /* 00b */ + { RV, RV, RV, RV }, /* 01b */ + { RV, RV, RV, RV }, /* 10b */ + { RV, RV, RV, RV }, + }, +}; + +static const struct piix_map_db ich8m_apple_map_db = { + .mask = 0x3, + .port_enable = 0x1, + .map = { + /* PM PS SM SS MAP */ + { P0, NA, NA, NA }, /* 00b */ + { RV, RV, RV, RV }, { P0, P2, IDE, IDE }, /* 10b */ { RV, RV, RV, RV }, }, }; -static const struct piix_map_db ich8_map_db = { +static const struct piix_map_db tolapai_map_db = { .mask = 0x3, .port_enable = 0x3, - .present_shift = 8, .map = { /* PM PS SM SS MAP */ - { P0, NA, P1, NA }, /* 00b (hardwired) */ - { RV, RV, RV, RV }, - { RV, RV, RV, RV }, /* 10b (never) */ + { P0, NA, P1, NA }, /* 00b */ + { RV, RV, RV, RV }, /* 01b */ + { RV, RV, RV, RV }, /* 10b */ { RV, RV, RV, RV }, }, }; static const struct piix_map_db *piix_map_db_table[] = { [ich5_sata] = &ich5_map_db, - [esb_sata] = &ich5_map_db, [ich6_sata] = &ich6_map_db, - [ich6_sata_ahci] = &ich6_map_db, - [ich6m_sata_ahci] = &ich6m_map_db, - [ich7m_sata_ahci] = &ich7m_map_db, - [ich8_sata_ahci] = &ich8_map_db, -}; - -static struct ata_port_info piix_port_info[] = { - /* piix_pata_33: 0: PIIX3 or 4 at 33MHz */ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ - .udma_mask = ATA_UDMA_MASK_40C, - .port_ops = &piix_pata_ops, - }, - - /* ich_pata_33: 1 ICH0 - ICH at 33Mhz*/ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS, - .pio_mask = 0x1f, /* pio 0-4 */ - .mwdma_mask = 0x06, /* Check: maybe 0x07 */ - .udma_mask = ATA_UDMA2, /* UDMA33 */ - .port_ops = &ich_pata_ops, - }, - /* ich_pata_66: 2 ICH controllers up to 66MHz */ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS, - .pio_mask = 0x1f, /* pio 0-4 */ - .mwdma_mask = 0x06, /* MWDMA0 is broken on chip */ - .udma_mask = ATA_UDMA4, - .port_ops = &ich_pata_ops, - }, - - /* ich_pata_100: 3 */ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x06, /* mwdma1-2 */ - .udma_mask = ATA_UDMA5, /* udma0-5 */ - .port_ops = &ich_pata_ops, - }, - - /* ich_pata_133: 4 ICH with full UDMA6 */ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR, - .pio_mask = 0x1f, /* pio 0-4 */ - .mwdma_mask = 0x06, /* Check: maybe 0x07 */ - .udma_mask = ATA_UDMA6, /* UDMA133 */ - .port_ops = &ich_pata_ops, - }, - - /* ich5_sata: 5 */ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | - PIIX_FLAG_IGNORE_PCS, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 */ - .port_ops = &piix_sata_ops, - }, - - /* i6300esb_sata: 6 */ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SATA | - PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 */ - .port_ops = &piix_sata_ops, - }, - - /* ich6_sata: 7 */ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SATA | - PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 */ - .port_ops = &piix_sata_ops, - }, - - /* ich6_sata_ahci: 8 */ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SATA | - PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | - PIIX_FLAG_AHCI, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 */ - .port_ops = &piix_sata_ops, - }, - - /* ich6m_sata_ahci: 9 */ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SATA | - PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | - PIIX_FLAG_AHCI, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 */ - .port_ops = &piix_sata_ops, - }, - - /* ich7m_sata_ahci: 10 */ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SATA | - PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | - PIIX_FLAG_AHCI, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 */ - .port_ops = &piix_sata_ops, - }, - - /* ich8_sata_ahci: 11 */ - { - .sht = &piix_sht, - .flags = ATA_FLAG_SATA | - PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | - PIIX_FLAG_AHCI, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 */ - .port_ops = &piix_sata_ops, - }, - + [ich6m_sata] = &ich6m_map_db, + [ich8_sata] = &ich8_map_db, + [ich8_2port_sata] = &ich8_2port_map_db, + [ich8m_apple_sata] = &ich8m_apple_map_db, + [tolapai_sata] = &tolapai_map_db, + [ich8_sata_snb] = &ich8_map_db, + [ich8_2port_sata_snb] = &ich8_2port_map_db, + [ich8_2port_sata_byt] = &ich8_2port_map_db, }; static struct pci_bits piix_enable_bits[] = { @@ -593,195 +461,104 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, piix_pci_tbl); MODULE_VERSION(DRV_VERSION); -static int force_pcs = 0; -module_param(force_pcs, int, 0444); -MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around " - "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)"); +struct ich_laptop { + u16 device; + u16 subvendor; + u16 subdevice; +}; -/** - * piix_pata_cbl_detect - Probe host controller cable detect info - * @ap: Port for which cable detect info is desired - * - * Read 80c cable indicator from ATA PCI device's PCI config - * register. This register is normally set by firmware (BIOS). - * - * LOCKING: - * None (inherited from caller). +/* + * List of laptops that use short cables rather than 80 wire */ -static void ich_pata_cbl_detect(struct ata_port *ap) -{ - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u8 tmp, mask; - - /* no 80c support in host controller? */ - if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0) - goto cbl40; - - /* check BIOS cable detect results */ - mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; - pci_read_config_byte(pdev, PIIX_IOCFG, &tmp); - if ((tmp & mask) == 0) - goto cbl40; - - ap->cbl = ATA_CBL_PATA80; - return; - -cbl40: - ap->cbl = ATA_CBL_PATA40; -} +static const struct ich_laptop ich_laptop[] = { + /* devid, subvendor, subdev */ + { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ + { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ + { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ + { 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */ + { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ + { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ + { 0x27DF, 0x103C, 0x361a }, /* ICH7 on unknown HP */ + { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ + { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ + { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ + { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ + { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ + { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ + { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ + /* end marker */ + { 0, } +}; -/** - * piix_pata_prereset - prereset for PATA host controller - * @ap: Target port - * - * - * LOCKING: - * None (inherited from caller). - */ -static int piix_pata_prereset(struct ata_port *ap) +static int piix_port_start(struct ata_port *ap) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); + if (!(ap->flags & PIIX_FLAG_PIO16)) + ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; - if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) - return -ENOENT; - - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -static void piix_pata_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, piix_pata_prereset, ata_std_softreset, NULL, - ata_std_postreset); + return ata_bmdma_port_start(ap); } - /** - * ich_pata_prereset - prereset for PATA host controller - * @ap: Target port + * ich_pata_cable_detect - Probe host controller cable detect info + * @ap: Port for which cable detect info is desired * + * Read 80c cable indicator from ATA PCI device's PCI config + * register. This register is normally set by firmware (BIOS). * * LOCKING: * None (inherited from caller). */ -static int ich_pata_prereset(struct ata_port *ap) -{ - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - - if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) { - ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n"); - ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; - return 0; - } - - ich_pata_cbl_detect(ap); - - return ata_std_prereset(ap); -} - -static void ich_pata_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, ich_pata_prereset, ata_std_softreset, NULL, - ata_std_postreset); -} -/** - * piix_sata_present_mask - determine present mask for SATA host controller - * @ap: Target port - * - * Reads SATA PCI device's PCI config register Port Configuration - * and Status (PCS) to determine port and device availability. - * - * LOCKING: - * None (inherited from caller). - * - * RETURNS: - * determined present_mask - */ -static unsigned int piix_sata_present_mask(struct ata_port *ap) +static int ich_pata_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct piix_host_priv *hpriv = ap->host->private_data; - const unsigned int *map = hpriv->map; - int base = 2 * ap->port_no; - unsigned int present_mask = 0; - int port, i; - u16 pcs; + const struct ich_laptop *lap = &ich_laptop[0]; + u8 mask; - pci_read_config_word(pdev, ICH5_PCS, &pcs); - DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base); + /* Check for specials */ + while (lap->device) { + if (lap->device == pdev->device && + lap->subvendor == pdev->subsystem_vendor && + lap->subdevice == pdev->subsystem_device) + return ATA_CBL_PATA40_SHORT; - for (i = 0; i < 2; i++) { - port = map[base + i]; - if (port < 0) - continue; - if ((ap->flags & PIIX_FLAG_IGNORE_PCS) || - (pcs & 1 << (hpriv->map_db->present_shift + port))) - present_mask |= 1 << i; + lap++; } - DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", - ap->id, pcs, present_mask); - - return present_mask; + /* check BIOS cable detect results */ + mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; + if ((hpriv->saved_iocfg & mask) == 0) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; } /** - * piix_sata_softreset - reset SATA host port via ATA SRST - * @ap: port to reset - * @classes: resulting classes of attached devices - * - * Reset SATA host port via ATA SRST. On controllers with - * reliable PCS present bits, the bits are used to determine - * device presence. + * piix_pata_prereset - prereset for PATA host controller + * @link: Target link + * @deadline: deadline jiffies for the operation * * LOCKING: - * Kernel thread context (may sleep) - * - * RETURNS: - * 0 on success, -errno otherwise. + * None (inherited from caller). */ -static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes) +static int piix_pata_prereset(struct ata_link *link, unsigned long deadline) { - unsigned int present_mask; - int i, rc; - - present_mask = piix_sata_present_mask(ap); - - rc = ata_std_softreset(ap, classes); - if (rc) - return rc; - - for (i = 0; i < ATA_MAX_DEVICES; i++) { - if (!(present_mask & (1 << i))) - classes[i] = ATA_DEV_NONE; - } - - return 0; -} + struct ata_port *ap = link->ap; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); -static void piix_sata_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL, - ata_std_postreset); + if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) + return -ENOENT; + return ata_sff_prereset(link, deadline); } -/** - * piix_set_piomode - Initialize host controller PATA PIO timings - * @ap: Port whose timings we are configuring - * @adev: um - * - * Set PIO mode for device, in host controller PCI config space. - * - * LOCKING: - * None (inherited from caller). - */ +static DEFINE_SPINLOCK(piix_lock); -static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev) +static void piix_set_timings(struct ata_port *ap, struct ata_device *adev, + u8 pio) { - unsigned int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *dev = to_pci_dev(ap->host->dev); + unsigned long flags; unsigned int is_slave = (adev->devno != 0); unsigned int master_port= ap->port_no ? 0x42 : 0x40; unsigned int slave_port = 0x44; @@ -806,30 +583,47 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev) control |= 1; /* TIME1 enable */ if (ata_pio_need_iordy(adev)) control |= 2; /* IE enable */ - /* Intel specifies that the PPE functionality is for disk only */ if (adev->class == ATA_DEV_ATA) control |= 4; /* PPE enable */ + /* + * If the drive MWDMA is faster than it can do PIO then + * we must force PIO into PIO0 + */ + if (adev->pio_mode < XFER_PIO_0 + pio) + /* Enable DMA timing only */ + control |= 8; /* PIO cycles in PIO0 */ + spin_lock_irqsave(&piix_lock, flags); + + /* PIO configuration clears DTE unconditionally. It will be + * programmed in set_dmamode which is guaranteed to be called + * after set_piomode if any DMA mode is available. + */ pci_read_config_word(dev, master_port, &master_data); if (is_slave) { - /* Enable SITRE (seperate slave timing register) */ - master_data |= 0x4000; + /* clear TIME1|IE1|PPE1|DTE1 */ + master_data &= 0xff0f; /* enable PPE1, IE1 and TIME1 as needed */ master_data |= (control << 4); pci_read_config_byte(dev, slave_port, &slave_data); slave_data &= (ap->port_no ? 0x0f : 0xf0); /* Load the timing nibble for this slave */ - slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); + slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) + << (ap->port_no ? 4 : 0); } else { - /* Master keeps the bits in a different format */ - master_data &= 0xccf8; + /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */ + master_data &= 0xccf0; /* Enable PPE, IE and TIME as appropriate */ master_data |= control; + /* load ISP and RCT */ master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); } + + /* Enable SITRE (separate slave timing register) */ + master_data |= 0x4000; pci_write_config_word(dev, master_port, master_data); if (is_slave) pci_write_config_byte(dev, slave_port, slave_data); @@ -842,13 +636,30 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev) udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); pci_write_config_byte(dev, 0x48, udma_enable); } + + spin_unlock_irqrestore(&piix_lock, flags); +} + +/** + * piix_set_piomode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: Drive in question + * + * Set PIO mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + piix_set_timings(ap, adev, adev->pio_mode - XFER_PIO_0); } /** * do_pata_set_dmamode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Drive in question - * @udma: udma mode, 0 - 6 * @isich: set if the chip is an ICH device * * Set UDMA mode for device, in host controller PCI config space. @@ -857,33 +668,26 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev) * None (inherited from caller). */ -static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, int isich) +static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich) { struct pci_dev *dev = to_pci_dev(ap->host->dev); - u8 master_port = ap->port_no ? 0x42 : 0x40; - u16 master_data; + unsigned long flags; u8 speed = adev->dma_mode; int devid = adev->devno + 2 * ap->port_no; - u8 udma_enable; - - static const /* ISP RTC */ - u8 timings[][2] = { { 0, 0 }, - { 0, 0 }, - { 1, 0 }, - { 2, 1 }, - { 2, 3 }, }; - - pci_read_config_word(dev, master_port, &master_data); - pci_read_config_byte(dev, 0x48, &udma_enable); + u8 udma_enable = 0; if (speed >= XFER_UDMA_0) { - unsigned int udma = adev->dma_mode - XFER_UDMA_0; + unsigned int udma = speed - XFER_UDMA_0; u16 udma_timing; u16 ideconf; int u_clock, u_speed; + spin_lock_irqsave(&piix_lock, flags); + + pci_read_config_byte(dev, 0x48, &udma_enable); + /* - * UDMA is handled by a combination of clock switching and + * UDMA is handled by a combination of clock switching and * selection of dividers * * Handy rule: Odd modes are UDMATIMx 01, even are 02 @@ -914,51 +718,21 @@ static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, i performance (WR_PingPong_En) */ pci_write_config_word(dev, 0x54, ideconf); } + + pci_write_config_byte(dev, 0x48, udma_enable); + + spin_unlock_irqrestore(&piix_lock, flags); } else { - /* - * MWDMA is driven by the PIO timings. We must also enable - * IORDY unconditionally along with TIME1. PPE has already - * been set when the PIO timing was set. - */ - unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0; - unsigned int control; - u8 slave_data; + /* MWDMA is driven by the PIO timings. */ + unsigned int mwdma = speed - XFER_MW_DMA_0; const unsigned int needed_pio[3] = { XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 }; int pio = needed_pio[mwdma] - XFER_PIO_0; - control = 3; /* IORDY|TIME1 */ - - /* If the drive MWDMA is faster than it can do PIO then - we must force PIO into PIO0 */ - - if (adev->pio_mode < needed_pio[mwdma]) - /* Enable DMA timing only */ - control |= 8; /* PIO cycles in PIO0 */ - - if (adev->devno) { /* Slave */ - master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */ - master_data |= control << 4; - pci_read_config_byte(dev, 0x44, &slave_data); - slave_data &= (0x0F + 0xE1 * ap->port_no); - /* Load the matching timing */ - slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); - pci_write_config_byte(dev, 0x44, slave_data); - } else { /* Master */ - master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY - and master timing bits */ - master_data |= control; - master_data |= - (timings[pio][0] << 12) | - (timings[pio][1] << 8); - } - udma_enable &= ~(1 << devid); - pci_write_config_word(dev, master_port, master_data); + /* XFER_PIO_0 is never used currently */ + piix_set_timings(ap, adev, pio); } - /* Don't scribble on 0x48 if the controller does not support UDMA */ - if (ap->udma_mask) - pci_write_config_byte(dev, 0x48, udma_enable); } /** @@ -972,7 +746,7 @@ static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, i * None (inherited from caller). */ -static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev) +static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev) { do_pata_set_dmamode(ap, adev, 0); } @@ -988,11 +762,517 @@ static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev) * None (inherited from caller). */ -static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev) +static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev) { do_pata_set_dmamode(ap, adev, 1); } +/* + * Serial ATA Index/Data Pair Superset Registers access + * + * Beginning from ICH8, there's a sane way to access SCRs using index + * and data register pair located at BAR5 which means that we have + * separate SCRs for master and slave. This is handled using libata + * slave_link facility. + */ +static const int piix_sidx_map[] = { + [SCR_STATUS] = 0, + [SCR_ERROR] = 2, + [SCR_CONTROL] = 1, +}; + +static void piix_sidpr_sel(struct ata_link *link, unsigned int reg) +{ + struct ata_port *ap = link->ap; + struct piix_host_priv *hpriv = ap->host->private_data; + + iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg], + hpriv->sidpr + PIIX_SIDPR_IDX); +} + +static int piix_sidpr_scr_read(struct ata_link *link, + unsigned int reg, u32 *val) +{ + struct piix_host_priv *hpriv = link->ap->host->private_data; + + if (reg >= ARRAY_SIZE(piix_sidx_map)) + return -EINVAL; + + piix_sidpr_sel(link, reg); + *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA); + return 0; +} + +static int piix_sidpr_scr_write(struct ata_link *link, + unsigned int reg, u32 val) +{ + struct piix_host_priv *hpriv = link->ap->host->private_data; + + if (reg >= ARRAY_SIZE(piix_sidx_map)) + return -EINVAL; + + piix_sidpr_sel(link, reg); + iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); + return 0; +} + +static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + unsigned hints) +{ + return sata_link_scr_lpm(link, policy, false); +} + +static bool piix_irq_check(struct ata_port *ap) +{ + if (unlikely(!ap->ioaddr.bmdma_addr)) + return false; + + return ap->ops->bmdma_status(ap) & ATA_DMA_INTR; +} + +#ifdef CONFIG_PM_SLEEP +static int piix_broken_suspend(void) +{ + static const struct dmi_system_id sysids[] = { + { + .ident = "TECRA M3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"), + }, + }, + { + .ident = "TECRA M3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"), + }, + }, + { + .ident = "TECRA M4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M4"), + }, + }, + { + .ident = "TECRA M4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M4"), + }, + }, + { + .ident = "TECRA M5", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"), + }, + }, + { + .ident = "TECRA M6", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M6"), + }, + }, + { + .ident = "TECRA M7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M7"), + }, + }, + { + .ident = "TECRA A8", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A8"), + }, + }, + { + .ident = "Satellite R20", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R20"), + }, + }, + { + .ident = "Satellite R25", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R25"), + }, + }, + { + .ident = "Satellite U200", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"), + }, + }, + { + .ident = "Satellite U200", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U200"), + }, + }, + { + .ident = "Satellite Pro U200", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE PRO U200"), + }, + }, + { + .ident = "Satellite U205", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"), + }, + }, + { + .ident = "SATELLITE U205", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"), + }, + }, + { + .ident = "Satellite Pro A120", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"), + }, + }, + { + .ident = "Portege M500", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"), + }, + }, + { + .ident = "VGN-BX297XP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-BX297XP"), + }, + }, + + { } /* terminate list */ + }; + static const char *oemstrs[] = { + "Tecra M3,", + }; + int i; + + if (dmi_check_system(sysids)) + return 1; + + for (i = 0; i < ARRAY_SIZE(oemstrs); i++) + if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL)) + return 1; + + /* TECRA M4 sometimes forgets its identify and reports bogus + * DMI information. As the bogus information is a bit + * generic, match as many entries as possible. This manual + * matching is necessary because dmi_system_id.matches is + * limited to four entries. + */ + if (dmi_match(DMI_SYS_VENDOR, "TOSHIBA") && + dmi_match(DMI_PRODUCT_NAME, "000000") && + dmi_match(DMI_PRODUCT_VERSION, "000000") && + dmi_match(DMI_PRODUCT_SERIAL, "000000") && + dmi_match(DMI_BOARD_VENDOR, "TOSHIBA") && + dmi_match(DMI_BOARD_NAME, "Portable PC") && + dmi_match(DMI_BOARD_VERSION, "Version A0")) + return 1; + + return 0; +} + +static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) +{ + struct ata_host *host = pci_get_drvdata(pdev); + unsigned long flags; + int rc = 0; + + rc = ata_host_suspend(host, mesg); + if (rc) + return rc; + + /* Some braindamaged ACPI suspend implementations expect the + * controller to be awake on entry; otherwise, it burns cpu + * cycles and power trying to do something to the sleeping + * beauty. + */ + if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) { + pci_save_state(pdev); + + /* mark its power state as "unknown", since we don't + * know if e.g. the BIOS will change its device state + * when we suspend. + */ + if (pdev->current_state == PCI_D0) + pdev->current_state = PCI_UNKNOWN; + + /* tell resume that it's waking up from broken suspend */ + spin_lock_irqsave(&host->lock, flags); + host->flags |= PIIX_HOST_BROKEN_SUSPEND; + spin_unlock_irqrestore(&host->lock, flags); + } else + ata_pci_device_do_suspend(pdev, mesg); + + return 0; +} + +static int piix_pci_device_resume(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + unsigned long flags; + int rc; + + if (host->flags & PIIX_HOST_BROKEN_SUSPEND) { + spin_lock_irqsave(&host->lock, flags); + host->flags &= ~PIIX_HOST_BROKEN_SUSPEND; + spin_unlock_irqrestore(&host->lock, flags); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* PCI device wasn't disabled during suspend. Use + * pci_reenable_device() to avoid affecting the enable + * count. + */ + rc = pci_reenable_device(pdev); + if (rc) + dev_err(&pdev->dev, + "failed to enable device after resume (%d)\n", + rc); + } else + rc = ata_pci_device_do_resume(pdev); + + if (rc == 0) + ata_host_resume(host); + + return rc; +} +#endif + +static u8 piix_vmw_bmdma_status(struct ata_port *ap) +{ + return ata_bmdma_status(ap) & ~ATA_DMA_ERR; +} + +static struct scsi_host_template piix_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations piix_sata_ops = { + .inherits = &ata_bmdma32_port_ops, + .sff_irq_check = piix_irq_check, + .port_start = piix_port_start, +}; + +static struct ata_port_operations piix_pata_ops = { + .inherits = &piix_sata_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = piix_set_piomode, + .set_dmamode = piix_set_dmamode, + .prereset = piix_pata_prereset, +}; + +static struct ata_port_operations piix_vmw_ops = { + .inherits = &piix_pata_ops, + .bmdma_status = piix_vmw_bmdma_status, +}; + +static struct ata_port_operations ich_pata_ops = { + .inherits = &piix_pata_ops, + .cable_detect = ich_pata_cable_detect, + .set_dmamode = ich_set_dmamode, +}; + +static struct device_attribute *piix_sidpr_shost_attrs[] = { + &dev_attr_link_power_management_policy, + NULL +}; + +static struct scsi_host_template piix_sidpr_sht = { + ATA_BMDMA_SHT(DRV_NAME), + .shost_attrs = piix_sidpr_shost_attrs, +}; + +static struct ata_port_operations piix_sidpr_sata_ops = { + .inherits = &piix_sata_ops, + .hardreset = sata_std_hardreset, + .scr_read = piix_sidpr_scr_read, + .scr_write = piix_sidpr_scr_write, + .set_lpm = piix_sidpr_set_lpm, +}; + +static struct ata_port_info piix_port_info[] = { + [piix_pata_mwdma] = /* PIIX3 MWDMA only */ + { + .flags = PIIX_PATA_FLAGS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ + .port_ops = &piix_pata_ops, + }, + + [piix_pata_33] = /* PIIX4 at 33MHz */ + { + .flags = PIIX_PATA_FLAGS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ + .udma_mask = ATA_UDMA2, + .port_ops = &piix_pata_ops, + }, + + [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/ + { + .flags = PIIX_PATA_FLAGS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */ + .udma_mask = ATA_UDMA2, + .port_ops = &ich_pata_ops, + }, + + [ich_pata_66] = /* ICH controllers up to 66MHz */ + { + .flags = PIIX_PATA_FLAGS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */ + .udma_mask = ATA_UDMA4, + .port_ops = &ich_pata_ops, + }, + + [ich_pata_100] = + { + .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, + .udma_mask = ATA_UDMA5, + .port_ops = &ich_pata_ops, + }, + + [ich_pata_100_nomwdma1] = + { + .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2_ONLY, + .udma_mask = ATA_UDMA5, + .port_ops = &ich_pata_ops, + }, + + [ich5_sata] = + { + .flags = PIIX_SATA_FLAGS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, + + [ich6_sata] = + { + .flags = PIIX_SATA_FLAGS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, + + [ich6m_sata] = + { + .flags = PIIX_SATA_FLAGS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, + + [ich8_sata] = + { + .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, + + [ich8_2port_sata] = + { + .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, + + [tolapai_sata] = + { + .flags = PIIX_SATA_FLAGS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, + + [ich8m_apple_sata] = + { + .flags = PIIX_SATA_FLAGS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, + + [piix_pata_vmw] = + { + .flags = PIIX_PATA_FLAGS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ + .udma_mask = ATA_UDMA2, + .port_ops = &piix_vmw_ops, + }, + + /* + * some Sandybridge chipsets have broken 32 mode up to now, + * see https://bugzilla.kernel.org/show_bug.cgi?id=40592 + */ + [ich8_sata_snb] = + { + .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, + + [ich8_2port_sata_snb] = + { + .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR + | PIIX_FLAG_PIO16, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, + + [ich8_2port_sata_byt] = + { + .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, + +}; + #define AHCI_PCI_BAR 5 #define AHCI_GLOBAL_CTL 0x04 #define AHCI_ENABLE (1 << 31) @@ -1014,12 +1294,12 @@ static int piix_disable_ahci(struct pci_dev *pdev) if (!mmio) return -ENOMEM; - tmp = readl(mmio + AHCI_GLOBAL_CTL); + tmp = ioread32(mmio + AHCI_GLOBAL_CTL); if (tmp & AHCI_ENABLE) { tmp &= ~AHCI_ENABLE; - writel(tmp, mmio + AHCI_GLOBAL_CTL); + iowrite32(tmp, mmio + AHCI_GLOBAL_CTL); - tmp = readl(mmio + AHCI_GLOBAL_CTL); + tmp = ioread32(mmio + AHCI_GLOBAL_CTL); if (tmp & AHCI_ENABLE) rc = -EIO; } @@ -1036,37 +1316,36 @@ static int piix_disable_ahci(struct pci_dev *pdev) * they are found return an error code so we can turn off DMA */ -static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) +static int piix_check_450nx_errata(struct pci_dev *ata_dev) { struct pci_dev *pdev = NULL; u16 cfg; - u8 rev; int no_piix_dma = 0; - while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) - { + while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) { /* Look for 450NX PXB. Check for problem configurations A PCI quirk checks bit 6 already */ - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); pci_read_config_word(pdev, 0x41, &cfg); /* Only on the original revision: IDE DMA can hang */ - if (rev == 0x00) + if (pdev->revision == 0x00) no_piix_dma = 1; /* On all revisions below 5 PXB bus lock must be disabled for IDE */ - else if (cfg & (1<<14) && rev < 5) + else if (cfg & (1<<14) && pdev->revision < 5) no_piix_dma = 2; } if (no_piix_dma) - dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n"); - if (no_piix_dma == 2) - dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n"); + dev_warn(&ata_dev->dev, + "450NX errata present, disabling IDE DMA%s\n", + no_piix_dma == 2 ? " - a BIOS update may resolve this" + : ""); + return no_piix_dma; } -static void __devinit piix_init_pcs(struct pci_dev *pdev, - struct ata_port_info *pinfo, - const struct piix_map_db *map_db) +static void piix_init_pcs(struct ata_host *host, + const struct piix_map_db *map_db) { + struct pci_dev *pdev = to_pci_dev(host->dev); u16 pcs, new_pcs; pci_read_config_word(pdev, ICH5_PCS, &pcs); @@ -1078,68 +1357,278 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev, pci_write_config_word(pdev, ICH5_PCS, new_pcs); msleep(150); } - - if (force_pcs == 1) { - dev_printk(KERN_INFO, &pdev->dev, - "force ignoring PCS (0x%x)\n", new_pcs); - pinfo[0].flags |= PIIX_FLAG_IGNORE_PCS; - pinfo[1].flags |= PIIX_FLAG_IGNORE_PCS; - } else if (force_pcs == 2) { - dev_printk(KERN_INFO, &pdev->dev, - "force honoring PCS (0x%x)\n", new_pcs); - pinfo[0].flags &= ~PIIX_FLAG_IGNORE_PCS; - pinfo[1].flags &= ~PIIX_FLAG_IGNORE_PCS; - } } -static void __devinit piix_init_sata_map(struct pci_dev *pdev, - struct ata_port_info *pinfo, - const struct piix_map_db *map_db) +static const int *piix_init_sata_map(struct pci_dev *pdev, + struct ata_port_info *pinfo, + const struct piix_map_db *map_db) { - struct piix_host_priv *hpriv = pinfo[0].private_data; - const unsigned int *map; + const int *map; int i, invalid_map = 0; u8 map_value; + char buf[32]; + char *p = buf, *end = buf + sizeof(buf); pci_read_config_byte(pdev, ICH5_PMR, &map_value); map = map_db->map[map_value & map_db->mask]; - dev_printk(KERN_INFO, &pdev->dev, "MAP ["); for (i = 0; i < 4; i++) { switch (map[i]) { case RV: invalid_map = 1; - printk(" XX"); + p += scnprintf(p, end - p, " XX"); break; case NA: - printk(" --"); + p += scnprintf(p, end - p, " --"); break; case IDE: WARN_ON((i & 1) || map[i + 1] != IDE); pinfo[i / 2] = piix_port_info[ich_pata_100]; - pinfo[i / 2].private_data = hpriv; i++; - printk(" IDE IDE"); + p += scnprintf(p, end - p, " IDE IDE"); break; default: - printk(" P%d", map[i]); + p += scnprintf(p, end - p, " P%d", map[i]); if (i & 1) pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS; break; } } - printk(" ]\n"); + dev_info(&pdev->dev, "MAP [%s ]\n", buf); if (invalid_map) - dev_printk(KERN_ERR, &pdev->dev, - "invalid MAP value %u\n", map_value); + dev_err(&pdev->dev, "invalid MAP value %u\n", map_value); + + return map; +} + +static bool piix_no_sidpr(struct ata_host *host) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + + /* + * Samsung DB-P70 only has three ATA ports exposed and + * curiously the unconnected first port reports link online + * while not responding to SRST protocol causing excessive + * detection delay. + * + * Unfortunately, the system doesn't carry enough DMI + * information to identify the machine but does have subsystem + * vendor and device set. As it's unclear whether the + * subsystem vendor/device is used only for this specific + * board, the port can't be disabled solely with the + * information; however, turning off SIDPR access works around + * the problem. Turn it off. + * + * This problem is reported in bnc#441240. + * + * https://bugzilla.novell.com/show_bug.cgi?id=441420 + */ + if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2920 && + pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG && + pdev->subsystem_device == 0xb049) { + dev_warn(host->dev, + "Samsung DB-P70 detected, disabling SIDPR\n"); + return true; + } - hpriv->map = map; - hpriv->map_db = map_db; + return false; +} + +static int piix_init_sidpr(struct ata_host *host) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + struct piix_host_priv *hpriv = host->private_data; + struct ata_link *link0 = &host->ports[0]->link; + u32 scontrol; + int i, rc; + + /* check for availability */ + for (i = 0; i < 4; i++) + if (hpriv->map[i] == IDE) + return 0; + + /* is it blacklisted? */ + if (piix_no_sidpr(host)) + return 0; + + if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR)) + return 0; + + if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 || + pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN) + return 0; + + if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME)) + return 0; + + hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR]; + + /* SCR access via SIDPR doesn't work on some configurations. + * Give it a test drive by inhibiting power save modes which + * we'll do anyway. + */ + piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol); + + /* if IPM is already 3, SCR access is probably working. Don't + * un-inhibit power save modes as BIOS might have inhibited + * them for a reason. + */ + if ((scontrol & 0xf00) != 0x300) { + scontrol |= 0x300; + piix_sidpr_scr_write(link0, SCR_CONTROL, scontrol); + piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol); + + if ((scontrol & 0xf00) != 0x300) { + dev_info(host->dev, + "SCR access via SIDPR is available but doesn't work\n"); + return 0; + } + } + + /* okay, SCRs available, set ops and ask libata for slave_link */ + for (i = 0; i < 2; i++) { + struct ata_port *ap = host->ports[i]; + + ap->ops = &piix_sidpr_sata_ops; + + if (ap->flags & ATA_FLAG_SLAVE_POSS) { + rc = ata_slave_link_init(ap); + if (rc) + return rc; + } + } + + return 0; +} + +static void piix_iocfg_bit18_quirk(struct ata_host *host) +{ + static const struct dmi_system_id sysids[] = { + { + /* Clevo M570U sets IOCFG bit 18 if the cdrom + * isn't used to boot the system which + * disables the channel. + */ + .ident = "M570U", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."), + DMI_MATCH(DMI_PRODUCT_NAME, "M570U"), + }, + }, + + { } /* terminate list */ + }; + struct pci_dev *pdev = to_pci_dev(host->dev); + struct piix_host_priv *hpriv = host->private_data; + + if (!dmi_check_system(sysids)) + return; + + /* The datasheet says that bit 18 is NOOP but certain systems + * seem to use it to disable a channel. Clear the bit on the + * affected systems. + */ + if (hpriv->saved_iocfg & (1 << 18)) { + dev_info(&pdev->dev, "applying IOCFG bit18 quirk\n"); + pci_write_config_dword(pdev, PIIX_IOCFG, + hpriv->saved_iocfg & ~(1 << 18)); + } +} + +static bool piix_broken_system_poweroff(struct pci_dev *pdev) +{ + static const struct dmi_system_id broken_systems[] = { + { + .ident = "HP Compaq 2510p", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 2510p"), + }, + /* PCI slot number of the controller */ + .driver_data = (void *)0x1FUL, + }, + { + .ident = "HP Compaq nc6000", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"), + }, + /* PCI slot number of the controller */ + .driver_data = (void *)0x1FUL, + }, + + { } /* terminate list */ + }; + const struct dmi_system_id *dmi = dmi_first_match(broken_systems); + + if (dmi) { + unsigned long slot = (unsigned long)dmi->driver_data; + /* apply the quirk only to on-board controllers */ + return slot == PCI_SLOT(pdev->devfn); + } + + return false; +} + +static int prefer_ms_hyperv = 1; +module_param(prefer_ms_hyperv, int, 0); +MODULE_PARM_DESC(prefer_ms_hyperv, + "Prefer Hyper-V paravirtualization drivers instead of ATA, " + "0 - Use ATA drivers, " + "1 (Default) - Use the paravirtualization drivers."); + +static void piix_ignore_devices_quirk(struct ata_host *host) +{ +#if IS_ENABLED(CONFIG_HYPERV_STORAGE) + static const struct dmi_system_id ignore_hyperv[] = { + { + /* On Hyper-V hypervisors the disks are exposed on + * both the emulated SATA controller and on the + * paravirtualised drivers. The CD/DVD devices + * are only exposed on the emulated controller. + * Request we ignore ATA devices on this host. + */ + .ident = "Hyper-V Virtual Machine", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), + }, + }, + { } /* terminate list */ + }; + static const struct dmi_system_id allow_virtual_pc[] = { + { + /* In MS Virtual PC guests the DMI ident is nearly + * identical to a Hyper-V guest. One difference is the + * product version which is used here to identify + * a Virtual PC guest. This entry allows ata_piix to + * drive the emulated hardware. + */ + .ident = "MS Virtual PC 2007", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), + DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"), + }, + }, + { } /* terminate list */ + }; + const struct dmi_system_id *ignore = dmi_first_match(ignore_hyperv); + const struct dmi_system_id *allow = dmi_first_match(allow_virtual_pc); + + if (ignore && !allow && prefer_ms_hyperv) { + host->flags |= ATA_HOST_IGNORE_ATA; + dev_info(host->dev, "%s detected, ATA device ignore set\n", + ignore->ident); + } +#endif } /** @@ -1157,51 +1646,85 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev, * Zero on success, or -ERRNO value. */ -static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; + struct device *dev = &pdev->dev; struct ata_port_info port_info[2]; - struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] }; - struct piix_host_priv *hpriv; + const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] }; + struct scsi_host_template *sht = &piix_sht; unsigned long port_flags; + struct ata_host *host; + struct piix_host_priv *hpriv; + int rc; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, - "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - /* no hotplugging support (FIXME) */ - if (!in_module_init) + /* no hotplugging support for later devices (FIXME) */ + if (!in_module_init && ent->driver_data >= ich5_sata) return -ENODEV; - hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); - if (!hpriv) - return -ENOMEM; + if (piix_broken_system_poweroff(pdev)) { + piix_port_info[ent->driver_data].flags |= + ATA_FLAG_NO_POWEROFF_SPINDOWN | + ATA_FLAG_NO_HIBERNATE_SPINDOWN; + dev_info(&pdev->dev, "quirky BIOS, skipping spindown " + "on poweroff and hibernation\n"); + } port_info[0] = piix_port_info[ent->driver_data]; port_info[1] = piix_port_info[ent->driver_data]; - port_info[0].private_data = hpriv; - port_info[1].private_data = hpriv; port_flags = port_info[0].flags; - if (port_flags & PIIX_FLAG_AHCI) { - u8 tmp; - pci_read_config_byte(pdev, PIIX_SCC, &tmp); - if (tmp == PIIX_AHCI_DEVICE) { - int rc = piix_disable_ahci(pdev); - if (rc) - return rc; - } + /* enable device and prepare host */ + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + + /* Save IOCFG, this will be used for cable detection, quirk + * detection and restoration on detach. This is necessary + * because some ACPI implementations mess up cable related + * bits on _STM. Reported on kernel bz#11879. + */ + pci_read_config_dword(pdev, PIIX_IOCFG, &hpriv->saved_iocfg); + + /* ICH6R may be driven by either ata_piix or ahci driver + * regardless of BIOS configuration. Make sure AHCI mode is + * off. + */ + if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2652) { + rc = piix_disable_ahci(pdev); + if (rc) + return rc; } - /* Initialize SATA map */ + /* SATA map init can change port_info, do it before prepping host */ + if (port_flags & ATA_FLAG_SATA) + hpriv->map = piix_init_sata_map(pdev, port_info, + piix_map_db_table[ent->driver_data]); + + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); + if (rc) + return rc; + host->private_data = hpriv; + + /* initialize controller */ if (port_flags & ATA_FLAG_SATA) { - piix_init_sata_map(pdev, port_info, - piix_map_db_table[ent->driver_data]); - piix_init_pcs(pdev, port_info, - piix_map_db_table[ent->driver_data]); + piix_init_pcs(host, piix_map_db_table[ent->driver_data]); + rc = piix_init_sidpr(host); + if (rc) + return rc; + if (host->ports[0]->ops == &piix_sidpr_sata_ops) + sht = &piix_sidpr_sht; } + /* apply IOCFG bit18 quirk */ + piix_iocfg_bit18_quirk(host); + /* On ICH5, some BIOSen disable the interrupt using the * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. * On ICH6, this bit has the same effect, but only when @@ -1215,23 +1738,41 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) /* This writes into the master table but it does not really matter for this errata as we will apply it to all the PIIX devices on the board */ - port_info[0].mwdma_mask = 0; - port_info[0].udma_mask = 0; - port_info[1].mwdma_mask = 0; - port_info[1].udma_mask = 0; + host->ports[0]->mwdma_mask = 0; + host->ports[0]->udma_mask = 0; + host->ports[1]->mwdma_mask = 0; + host->ports[1]->udma_mask = 0; } - return ata_pci_init_one(pdev, ppinfo, 2); + host->flags |= ATA_HOST_PARALLEL_SCAN; + + /* Allow hosts to specify device types to ignore when scanning. */ + piix_ignore_devices_quirk(host); + + pci_set_master(pdev); + return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); } -static void piix_host_stop(struct ata_host *host) +static void piix_remove_one(struct pci_dev *pdev) { + struct ata_host *host = pci_get_drvdata(pdev); struct piix_host_priv *hpriv = host->private_data; - ata_host_stop(host); + pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg); - kfree(hpriv); + ata_pci_remove_one(pdev); } +static struct pci_driver piix_pci_driver = { + .name = DRV_NAME, + .id_table = piix_pci_tbl, + .probe = piix_init_one, + .remove = piix_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = piix_pci_device_suspend, + .resume = piix_pci_device_resume, +#endif +}; + static int __init piix_init(void) { int rc; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c new file mode 100644 index 00000000000..d72ce047030 --- /dev/null +++ b/drivers/ata/libahci.c @@ -0,0 +1,2482 @@ +/* + * libahci.c - Common AHCI SATA low-level routines + * + * Maintained by: Tejun Heo <tj@kernel.org> + * Please ALWAYS copy linux-ide@vger.kernel.org + * on emails. + * + * Copyright 2004-2005 Red Hat, Inc. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/DocBook/libata.* + * + * AHCI hardware documentation: + * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf + * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf + * + */ + +#include <linux/kernel.h> +#include <linux/gfp.h> +#include <linux/module.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/device.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> +#include <linux/libata.h> +#include "ahci.h" +#include "libata.h" + +static int ahci_skip_host_reset; +int ahci_ignore_sss; +EXPORT_SYMBOL_GPL(ahci_ignore_sss); + +module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); +MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); + +module_param_named(ignore_sss, ahci_ignore_sss, int, 0444); +MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)"); + +static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + unsigned hints); +static ssize_t ahci_led_show(struct ata_port *ap, char *buf); +static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, + size_t size); +static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, + ssize_t size); + + + +static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); +static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); +static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); +static int ahci_port_start(struct ata_port *ap); +static void ahci_port_stop(struct ata_port *ap); +static void ahci_qc_prep(struct ata_queued_cmd *qc); +static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); +static void ahci_freeze(struct ata_port *ap); +static void ahci_thaw(struct ata_port *ap); +static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep); +static void ahci_enable_fbs(struct ata_port *ap); +static void ahci_disable_fbs(struct ata_port *ap); +static void ahci_pmp_attach(struct ata_port *ap); +static void ahci_pmp_detach(struct ata_port *ap); +static int ahci_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static int ahci_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static void ahci_postreset(struct ata_link *link, unsigned int *class); +static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); +static void ahci_dev_config(struct ata_device *dev); +#ifdef CONFIG_PM +static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); +#endif +static ssize_t ahci_activity_show(struct ata_device *dev, char *buf); +static ssize_t ahci_activity_store(struct ata_device *dev, + enum sw_activity val); +static void ahci_init_sw_activity(struct ata_link *link); + +static ssize_t ahci_show_host_caps(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t ahci_show_host_cap2(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t ahci_show_host_version(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t ahci_show_port_cmd(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t ahci_read_em_buffer(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t ahci_store_em_buffer(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size); +static ssize_t ahci_show_em_supported(struct device *dev, + struct device_attribute *attr, char *buf); + +static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); +static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); +static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL); +static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); +static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, + ahci_read_em_buffer, ahci_store_em_buffer); +static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL); + +struct device_attribute *ahci_shost_attrs[] = { + &dev_attr_link_power_management_policy, + &dev_attr_em_message_type, + &dev_attr_em_message, + &dev_attr_ahci_host_caps, + &dev_attr_ahci_host_cap2, + &dev_attr_ahci_host_version, + &dev_attr_ahci_port_cmd, + &dev_attr_em_buffer, + &dev_attr_em_message_supported, + NULL +}; +EXPORT_SYMBOL_GPL(ahci_shost_attrs); + +struct device_attribute *ahci_sdev_attrs[] = { + &dev_attr_sw_activity, + &dev_attr_unload_heads, + NULL +}; +EXPORT_SYMBOL_GPL(ahci_sdev_attrs); + +struct ata_port_operations ahci_ops = { + .inherits = &sata_pmp_port_ops, + + .qc_defer = ahci_pmp_qc_defer, + .qc_prep = ahci_qc_prep, + .qc_issue = ahci_qc_issue, + .qc_fill_rtf = ahci_qc_fill_rtf, + + .freeze = ahci_freeze, + .thaw = ahci_thaw, + .softreset = ahci_softreset, + .hardreset = ahci_hardreset, + .postreset = ahci_postreset, + .pmp_softreset = ahci_softreset, + .error_handler = ahci_error_handler, + .post_internal_cmd = ahci_post_internal_cmd, + .dev_config = ahci_dev_config, + + .scr_read = ahci_scr_read, + .scr_write = ahci_scr_write, + .pmp_attach = ahci_pmp_attach, + .pmp_detach = ahci_pmp_detach, + + .set_lpm = ahci_set_lpm, + .em_show = ahci_led_show, + .em_store = ahci_led_store, + .sw_activity_show = ahci_activity_show, + .sw_activity_store = ahci_activity_store, + .transmit_led_message = ahci_transmit_led_message, +#ifdef CONFIG_PM + .port_suspend = ahci_port_suspend, + .port_resume = ahci_port_resume, +#endif + .port_start = ahci_port_start, + .port_stop = ahci_port_stop, +}; +EXPORT_SYMBOL_GPL(ahci_ops); + +struct ata_port_operations ahci_pmp_retry_srst_ops = { + .inherits = &ahci_ops, + .softreset = ahci_pmp_retry_softreset, +}; +EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops); + +static bool ahci_em_messages __read_mostly = true; +EXPORT_SYMBOL_GPL(ahci_em_messages); +module_param(ahci_em_messages, bool, 0444); +/* add other LED protocol types when they become supported */ +MODULE_PARM_DESC(ahci_em_messages, + "AHCI Enclosure Management Message control (0 = off, 1 = on)"); + +/* device sleep idle timeout in ms */ +static int devslp_idle_timeout __read_mostly = 1000; +module_param(devslp_idle_timeout, int, 0644); +MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout"); + +static void ahci_enable_ahci(void __iomem *mmio) +{ + int i; + u32 tmp; + + /* turn on AHCI_EN */ + tmp = readl(mmio + HOST_CTL); + if (tmp & HOST_AHCI_EN) + return; + + /* Some controllers need AHCI_EN to be written multiple times. + * Try a few times before giving up. + */ + for (i = 0; i < 5; i++) { + tmp |= HOST_AHCI_EN; + writel(tmp, mmio + HOST_CTL); + tmp = readl(mmio + HOST_CTL); /* flush && sanity check */ + if (tmp & HOST_AHCI_EN) + return; + msleep(10); + } + + WARN_ON(1); +} + +static ssize_t ahci_show_host_caps(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + + return sprintf(buf, "%x\n", hpriv->cap); +} + +static ssize_t ahci_show_host_cap2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + + return sprintf(buf, "%x\n", hpriv->cap2); +} + +static ssize_t ahci_show_host_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *mmio = hpriv->mmio; + + return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION)); +} + +static ssize_t ahci_show_port_cmd(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + void __iomem *port_mmio = ahci_port_base(ap); + + return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD)); +} + +static ssize_t ahci_read_em_buffer(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *mmio = hpriv->mmio; + void __iomem *em_mmio = mmio + hpriv->em_loc; + u32 em_ctl, msg; + unsigned long flags; + size_t count; + int i; + + spin_lock_irqsave(ap->lock, flags); + + em_ctl = readl(mmio + HOST_EM_CTL); + if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT || + !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) { + spin_unlock_irqrestore(ap->lock, flags); + return -EINVAL; + } + + if (!(em_ctl & EM_CTL_MR)) { + spin_unlock_irqrestore(ap->lock, flags); + return -EAGAIN; + } + + if (!(em_ctl & EM_CTL_SMB)) + em_mmio += hpriv->em_buf_sz; + + count = hpriv->em_buf_sz; + + /* the count should not be larger than PAGE_SIZE */ + if (count > PAGE_SIZE) { + if (printk_ratelimit()) + ata_port_warn(ap, + "EM read buffer size too large: " + "buffer size %u, page size %lu\n", + hpriv->em_buf_sz, PAGE_SIZE); + count = PAGE_SIZE; + } + + for (i = 0; i < count; i += 4) { + msg = readl(em_mmio + i); + buf[i] = msg & 0xff; + buf[i + 1] = (msg >> 8) & 0xff; + buf[i + 2] = (msg >> 16) & 0xff; + buf[i + 3] = (msg >> 24) & 0xff; + } + + spin_unlock_irqrestore(ap->lock, flags); + + return i; +} + +static ssize_t ahci_store_em_buffer(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *mmio = hpriv->mmio; + void __iomem *em_mmio = mmio + hpriv->em_loc; + const unsigned char *msg_buf = buf; + u32 em_ctl, msg; + unsigned long flags; + int i; + + /* check size validity */ + if (!(ap->flags & ATA_FLAG_EM) || + !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) || + size % 4 || size > hpriv->em_buf_sz) + return -EINVAL; + + spin_lock_irqsave(ap->lock, flags); + + em_ctl = readl(mmio + HOST_EM_CTL); + if (em_ctl & EM_CTL_TM) { + spin_unlock_irqrestore(ap->lock, flags); + return -EBUSY; + } + + for (i = 0; i < size; i += 4) { + msg = msg_buf[i] | msg_buf[i + 1] << 8 | + msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24; + writel(msg, em_mmio + i); + } + + writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); + + spin_unlock_irqrestore(ap->lock, flags); + + return size; +} + +static ssize_t ahci_show_em_supported(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 em_ctl; + + em_ctl = readl(mmio + HOST_EM_CTL); + + return sprintf(buf, "%s%s%s%s\n", + em_ctl & EM_CTL_LED ? "led " : "", + em_ctl & EM_CTL_SAFTE ? "saf-te " : "", + em_ctl & EM_CTL_SES ? "ses-2 " : "", + em_ctl & EM_CTL_SGPIO ? "sgpio " : ""); +} + +/** + * ahci_save_initial_config - Save and fixup initial config values + * @dev: target AHCI device + * @hpriv: host private area to store config values + * @force_port_map: force port map to a specified value + * @mask_port_map: mask out particular bits from port map + * + * Some registers containing configuration info might be setup by + * BIOS and might be cleared on reset. This function saves the + * initial values of those registers into @hpriv such that they + * can be restored after controller reset. + * + * If inconsistent, config values are fixed up by this function. + * + * If it is not set already this function sets hpriv->start_engine to + * ahci_start_engine. + * + * LOCKING: + * None. + */ +void ahci_save_initial_config(struct device *dev, + struct ahci_host_priv *hpriv, + unsigned int force_port_map, + unsigned int mask_port_map) +{ + void __iomem *mmio = hpriv->mmio; + u32 cap, cap2, vers, port_map; + int i; + + /* make sure AHCI mode is enabled before accessing CAP */ + ahci_enable_ahci(mmio); + + /* Values prefixed with saved_ are written back to host after + * reset. Values without are used for driver operation. + */ + hpriv->saved_cap = cap = readl(mmio + HOST_CAP); + hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); + + /* CAP2 register is only defined for AHCI 1.2 and later */ + vers = readl(mmio + HOST_VERSION); + if ((vers >> 16) > 1 || + ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200)) + hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2); + else + hpriv->saved_cap2 = cap2 = 0; + + /* some chips have errata preventing 64bit use */ + if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { + dev_info(dev, "controller can't do 64bit DMA, forcing 32bit\n"); + cap &= ~HOST_CAP_64; + } + + if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { + dev_info(dev, "controller can't do NCQ, turning off CAP_NCQ\n"); + cap &= ~HOST_CAP_NCQ; + } + + if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) { + dev_info(dev, "controller can do NCQ, turning on CAP_NCQ\n"); + cap |= HOST_CAP_NCQ; + } + + if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { + dev_info(dev, "controller can't do PMP, turning off CAP_PMP\n"); + cap &= ~HOST_CAP_PMP; + } + + if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) { + dev_info(dev, + "controller can't do SNTF, turning off CAP_SNTF\n"); + cap &= ~HOST_CAP_SNTF; + } + + if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) { + dev_info(dev, + "controller can't do DEVSLP, turning off\n"); + cap2 &= ~HOST_CAP2_SDS; + cap2 &= ~HOST_CAP2_SADM; + } + + if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { + dev_info(dev, "controller can do FBS, turning on CAP_FBS\n"); + cap |= HOST_CAP_FBS; + } + + if ((cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_NO_FBS)) { + dev_info(dev, "controller can't do FBS, turning off CAP_FBS\n"); + cap &= ~HOST_CAP_FBS; + } + + if (force_port_map && port_map != force_port_map) { + dev_info(dev, "forcing port_map 0x%x -> 0x%x\n", + port_map, force_port_map); + port_map = force_port_map; + } + + if (mask_port_map) { + dev_warn(dev, "masking port_map 0x%x -> 0x%x\n", + port_map, + port_map & mask_port_map); + port_map &= mask_port_map; + } + + /* cross check port_map and cap.n_ports */ + if (port_map) { + int map_ports = 0; + + for (i = 0; i < AHCI_MAX_PORTS; i++) + if (port_map & (1 << i)) + map_ports++; + + /* If PI has more ports than n_ports, whine, clear + * port_map and let it be generated from n_ports. + */ + if (map_ports > ahci_nr_ports(cap)) { + dev_warn(dev, + "implemented port map (0x%x) contains more ports than nr_ports (%u), using nr_ports\n", + port_map, ahci_nr_ports(cap)); + port_map = 0; + } + } + + /* fabricate port_map from cap.nr_ports */ + if (!port_map) { + port_map = (1 << ahci_nr_ports(cap)) - 1; + dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map); + + /* write the fixed up value to the PI register */ + hpriv->saved_port_map = port_map; + } + + /* record values to use during operation */ + hpriv->cap = cap; + hpriv->cap2 = cap2; + hpriv->port_map = port_map; + + if (!hpriv->start_engine) + hpriv->start_engine = ahci_start_engine; +} +EXPORT_SYMBOL_GPL(ahci_save_initial_config); + +/** + * ahci_restore_initial_config - Restore initial config + * @host: target ATA host + * + * Restore initial config stored by ahci_save_initial_config(). + * + * LOCKING: + * None. + */ +static void ahci_restore_initial_config(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + + writel(hpriv->saved_cap, mmio + HOST_CAP); + if (hpriv->saved_cap2) + writel(hpriv->saved_cap2, mmio + HOST_CAP2); + writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); + (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ +} + +static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) +{ + static const int offset[] = { + [SCR_STATUS] = PORT_SCR_STAT, + [SCR_CONTROL] = PORT_SCR_CTL, + [SCR_ERROR] = PORT_SCR_ERR, + [SCR_ACTIVE] = PORT_SCR_ACT, + [SCR_NOTIFICATION] = PORT_SCR_NTF, + }; + struct ahci_host_priv *hpriv = ap->host->private_data; + + if (sc_reg < ARRAY_SIZE(offset) && + (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) + return offset[sc_reg]; + return 0; +} + +static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) +{ + void __iomem *port_mmio = ahci_port_base(link->ap); + int offset = ahci_scr_offset(link->ap, sc_reg); + + if (offset) { + *val = readl(port_mmio + offset); + return 0; + } + return -EINVAL; +} + +static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) +{ + void __iomem *port_mmio = ahci_port_base(link->ap); + int offset = ahci_scr_offset(link->ap, sc_reg); + + if (offset) { + writel(val, port_mmio + offset); + return 0; + } + return -EINVAL; +} + +void ahci_start_engine(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp; + + /* start DMA */ + tmp = readl(port_mmio + PORT_CMD); + tmp |= PORT_CMD_START; + writel(tmp, port_mmio + PORT_CMD); + readl(port_mmio + PORT_CMD); /* flush */ +} +EXPORT_SYMBOL_GPL(ahci_start_engine); + +int ahci_stop_engine(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp; + + tmp = readl(port_mmio + PORT_CMD); + + /* check if the HBA is idle */ + if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) + return 0; + + /* setting HBA to idle */ + tmp &= ~PORT_CMD_START; + writel(tmp, port_mmio + PORT_CMD); + + /* wait for engine to stop. This could be as long as 500 msec */ + tmp = ata_wait_register(ap, port_mmio + PORT_CMD, + PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); + if (tmp & PORT_CMD_LIST_ON) + return -EIO; + + return 0; +} +EXPORT_SYMBOL_GPL(ahci_stop_engine); + +void ahci_start_fis_rx(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + u32 tmp; + + /* set FIS registers */ + if (hpriv->cap & HOST_CAP_64) + writel((pp->cmd_slot_dma >> 16) >> 16, + port_mmio + PORT_LST_ADDR_HI); + writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); + + if (hpriv->cap & HOST_CAP_64) + writel((pp->rx_fis_dma >> 16) >> 16, + port_mmio + PORT_FIS_ADDR_HI); + writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); + + /* enable FIS reception */ + tmp = readl(port_mmio + PORT_CMD); + tmp |= PORT_CMD_FIS_RX; + writel(tmp, port_mmio + PORT_CMD); + + /* flush */ + readl(port_mmio + PORT_CMD); +} +EXPORT_SYMBOL_GPL(ahci_start_fis_rx); + +static int ahci_stop_fis_rx(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp; + + /* disable FIS reception */ + tmp = readl(port_mmio + PORT_CMD); + tmp &= ~PORT_CMD_FIS_RX; + writel(tmp, port_mmio + PORT_CMD); + + /* wait for completion, spec says 500ms, give it 1000 */ + tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_FIS_ON, + PORT_CMD_FIS_ON, 10, 1000); + if (tmp & PORT_CMD_FIS_ON) + return -EBUSY; + + return 0; +} + +static void ahci_power_up(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 cmd; + + cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; + + /* spin up device */ + if (hpriv->cap & HOST_CAP_SSS) { + cmd |= PORT_CMD_SPIN_UP; + writel(cmd, port_mmio + PORT_CMD); + } + + /* wake up link */ + writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); +} + +static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + unsigned int hints) +{ + struct ata_port *ap = link->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + + if (policy != ATA_LPM_MAX_POWER) { + /* + * Disable interrupts on Phy Ready. This keeps us from + * getting woken up due to spurious phy ready + * interrupts. + */ + pp->intr_mask &= ~PORT_IRQ_PHYRDY; + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + + sata_link_scr_lpm(link, policy, false); + } + + if (hpriv->cap & HOST_CAP_ALPM) { + u32 cmd = readl(port_mmio + PORT_CMD); + + if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) { + cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); + cmd |= PORT_CMD_ICC_ACTIVE; + + writel(cmd, port_mmio + PORT_CMD); + readl(port_mmio + PORT_CMD); + + /* wait 10ms to be sure we've come out of LPM state */ + ata_msleep(ap, 10); + } else { + cmd |= PORT_CMD_ALPE; + if (policy == ATA_LPM_MIN_POWER) + cmd |= PORT_CMD_ASP; + + /* write out new cmd value */ + writel(cmd, port_mmio + PORT_CMD); + } + } + + /* set aggressive device sleep */ + if ((hpriv->cap2 & HOST_CAP2_SDS) && + (hpriv->cap2 & HOST_CAP2_SADM) && + (link->device->flags & ATA_DFLAG_DEVSLP)) { + if (policy == ATA_LPM_MIN_POWER) + ahci_set_aggressive_devslp(ap, true); + else + ahci_set_aggressive_devslp(ap, false); + } + + if (policy == ATA_LPM_MAX_POWER) { + sata_link_scr_lpm(link, policy, false); + + /* turn PHYRDY IRQ back on */ + pp->intr_mask |= PORT_IRQ_PHYRDY; + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + } + + return 0; +} + +#ifdef CONFIG_PM +static void ahci_power_down(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 cmd, scontrol; + + if (!(hpriv->cap & HOST_CAP_SSS)) + return; + + /* put device into listen mode, first set PxSCTL.DET to 0 */ + scontrol = readl(port_mmio + PORT_SCR_CTL); + scontrol &= ~0xf; + writel(scontrol, port_mmio + PORT_SCR_CTL); + + /* then set PxCMD.SUD to 0 */ + cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; + cmd &= ~PORT_CMD_SPIN_UP; + writel(cmd, port_mmio + PORT_CMD); +} +#endif + +static void ahci_start_port(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + struct ata_link *link; + struct ahci_em_priv *emp; + ssize_t rc; + int i; + + /* enable FIS reception */ + ahci_start_fis_rx(ap); + + /* enable DMA */ + if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE)) + hpriv->start_engine(ap); + + /* turn on LEDs */ + if (ap->flags & ATA_FLAG_EM) { + ata_for_each_link(link, ap, EDGE) { + emp = &pp->em_priv[link->pmp]; + + /* EM Transmit bit maybe busy during init */ + for (i = 0; i < EM_MAX_RETRY; i++) { + rc = ap->ops->transmit_led_message(ap, + emp->led_state, + 4); + /* + * If busy, give a breather but do not + * release EH ownership by using msleep() + * instead of ata_msleep(). EM Transmit + * bit is busy for the whole host and + * releasing ownership will cause other + * ports to fail the same way. + */ + if (rc == -EBUSY) + msleep(1); + else + break; + } + } + } + + if (ap->flags & ATA_FLAG_SW_ACTIVITY) + ata_for_each_link(link, ap, EDGE) + ahci_init_sw_activity(link); + +} + +static int ahci_deinit_port(struct ata_port *ap, const char **emsg) +{ + int rc; + + /* disable DMA */ + rc = ahci_stop_engine(ap); + if (rc) { + *emsg = "failed to stop engine"; + return rc; + } + + /* disable FIS reception */ + rc = ahci_stop_fis_rx(ap); + if (rc) { + *emsg = "failed stop FIS RX"; + return rc; + } + + return 0; +} + +int ahci_reset_controller(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 tmp; + + /* we must be in AHCI mode, before using anything + * AHCI-specific, such as HOST_RESET. + */ + ahci_enable_ahci(mmio); + + /* global controller reset */ + if (!ahci_skip_host_reset) { + tmp = readl(mmio + HOST_CTL); + if ((tmp & HOST_RESET) == 0) { + writel(tmp | HOST_RESET, mmio + HOST_CTL); + readl(mmio + HOST_CTL); /* flush */ + } + + /* + * to perform host reset, OS should set HOST_RESET + * and poll until this bit is read to be "0". + * reset must complete within 1 second, or + * the hardware should be considered fried. + */ + tmp = ata_wait_register(NULL, mmio + HOST_CTL, HOST_RESET, + HOST_RESET, 10, 1000); + + if (tmp & HOST_RESET) { + dev_err(host->dev, "controller reset failed (0x%x)\n", + tmp); + return -EIO; + } + + /* turn on AHCI mode */ + ahci_enable_ahci(mmio); + + /* Some registers might be cleared on reset. Restore + * initial values. + */ + ahci_restore_initial_config(host); + } else + dev_info(host->dev, "skipping global host reset\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(ahci_reset_controller); + +static void ahci_sw_activity(struct ata_link *link) +{ + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + + if (!(link->flags & ATA_LFLAG_SW_ACTIVITY)) + return; + + emp->activity++; + if (!timer_pending(&emp->timer)) + mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10)); +} + +static void ahci_sw_activity_blink(unsigned long arg) +{ + struct ata_link *link = (struct ata_link *)arg; + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + unsigned long led_message = emp->led_state; + u32 activity_led_state; + unsigned long flags; + + led_message &= EM_MSG_LED_VALUE; + led_message |= ap->port_no | (link->pmp << 8); + + /* check to see if we've had activity. If so, + * toggle state of LED and reset timer. If not, + * turn LED to desired idle state. + */ + spin_lock_irqsave(ap->lock, flags); + if (emp->saved_activity != emp->activity) { + emp->saved_activity = emp->activity; + /* get the current LED state */ + activity_led_state = led_message & EM_MSG_LED_VALUE_ON; + + if (activity_led_state) + activity_led_state = 0; + else + activity_led_state = 1; + + /* clear old state */ + led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; + + /* toggle state */ + led_message |= (activity_led_state << 16); + mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100)); + } else { + /* switch to idle */ + led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; + if (emp->blink_policy == BLINK_OFF) + led_message |= (1 << 16); + } + spin_unlock_irqrestore(ap->lock, flags); + ap->ops->transmit_led_message(ap, led_message, 4); +} + +static void ahci_init_sw_activity(struct ata_link *link) +{ + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + + /* init activity stats, setup timer */ + emp->saved_activity = emp->activity = 0; + setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link); + + /* check our blink policy and set flag for link if it's enabled */ + if (emp->blink_policy) + link->flags |= ATA_LFLAG_SW_ACTIVITY; +} + +int ahci_reset_em(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 em_ctl; + + em_ctl = readl(mmio + HOST_EM_CTL); + if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST)) + return -EINVAL; + + writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL); + return 0; +} +EXPORT_SYMBOL_GPL(ahci_reset_em); + +static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, + ssize_t size) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + void __iomem *mmio = hpriv->mmio; + u32 em_ctl; + u32 message[] = {0, 0}; + unsigned long flags; + int pmp; + struct ahci_em_priv *emp; + + /* get the slot number from the message */ + pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; + if (pmp < EM_MAX_SLOTS) + emp = &pp->em_priv[pmp]; + else + return -EINVAL; + + spin_lock_irqsave(ap->lock, flags); + + /* + * if we are still busy transmitting a previous message, + * do not allow + */ + em_ctl = readl(mmio + HOST_EM_CTL); + if (em_ctl & EM_CTL_TM) { + spin_unlock_irqrestore(ap->lock, flags); + return -EBUSY; + } + + if (hpriv->em_msg_type & EM_MSG_TYPE_LED) { + /* + * create message header - this is all zero except for + * the message size, which is 4 bytes. + */ + message[0] |= (4 << 8); + + /* ignore 0:4 of byte zero, fill in port info yourself */ + message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no); + + /* write message to EM_LOC */ + writel(message[0], mmio + hpriv->em_loc); + writel(message[1], mmio + hpriv->em_loc+4); + + /* + * tell hardware to transmit the message + */ + writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); + } + + /* save off new led state for port/slot */ + emp->led_state = state; + + spin_unlock_irqrestore(ap->lock, flags); + return size; +} + +static ssize_t ahci_led_show(struct ata_port *ap, char *buf) +{ + struct ahci_port_priv *pp = ap->private_data; + struct ata_link *link; + struct ahci_em_priv *emp; + int rc = 0; + + ata_for_each_link(link, ap, EDGE) { + emp = &pp->em_priv[link->pmp]; + rc += sprintf(buf, "%lx\n", emp->led_state); + } + return rc; +} + +static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, + size_t size) +{ + unsigned int state; + int pmp; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp; + + if (kstrtouint(buf, 0, &state) < 0) + return -EINVAL; + + /* get the slot number from the message */ + pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; + if (pmp < EM_MAX_SLOTS) + emp = &pp->em_priv[pmp]; + else + return -EINVAL; + + /* mask off the activity bits if we are in sw_activity + * mode, user should turn off sw_activity before setting + * activity led through em_message + */ + if (emp->blink_policy) + state &= ~EM_MSG_LED_VALUE_ACTIVITY; + + return ap->ops->transmit_led_message(ap, state, size); +} + +static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val) +{ + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + u32 port_led_state = emp->led_state; + + /* save the desired Activity LED behavior */ + if (val == OFF) { + /* clear LFLAG */ + link->flags &= ~(ATA_LFLAG_SW_ACTIVITY); + + /* set the LED to OFF */ + port_led_state &= EM_MSG_LED_VALUE_OFF; + port_led_state |= (ap->port_no | (link->pmp << 8)); + ap->ops->transmit_led_message(ap, port_led_state, 4); + } else { + link->flags |= ATA_LFLAG_SW_ACTIVITY; + if (val == BLINK_OFF) { + /* set LED to ON for idle */ + port_led_state &= EM_MSG_LED_VALUE_OFF; + port_led_state |= (ap->port_no | (link->pmp << 8)); + port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */ + ap->ops->transmit_led_message(ap, port_led_state, 4); + } + } + emp->blink_policy = val; + return 0; +} + +static ssize_t ahci_activity_show(struct ata_device *dev, char *buf) +{ + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + + /* display the saved value of activity behavior for this + * disk. + */ + return sprintf(buf, "%d\n", emp->blink_policy); +} + +static void ahci_port_init(struct device *dev, struct ata_port *ap, + int port_no, void __iomem *mmio, + void __iomem *port_mmio) +{ + const char *emsg = NULL; + int rc; + u32 tmp; + + /* make sure port is not active */ + rc = ahci_deinit_port(ap, &emsg); + if (rc) + dev_warn(dev, "%s (%d)\n", emsg, rc); + + /* clear SError */ + tmp = readl(port_mmio + PORT_SCR_ERR); + VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); + writel(tmp, port_mmio + PORT_SCR_ERR); + + /* clear port IRQ */ + tmp = readl(port_mmio + PORT_IRQ_STAT); + VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); + if (tmp) + writel(tmp, port_mmio + PORT_IRQ_STAT); + + writel(1 << port_no, mmio + HOST_IRQ_STAT); +} + +void ahci_init_controller(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + int i; + void __iomem *port_mmio; + u32 tmp; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + port_mmio = ahci_port_base(ap); + if (ata_port_is_dummy(ap)) + continue; + + ahci_port_init(host->dev, ap, i, mmio, port_mmio); + } + + tmp = readl(mmio + HOST_CTL); + VPRINTK("HOST_CTL 0x%x\n", tmp); + writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); + tmp = readl(mmio + HOST_CTL); + VPRINTK("HOST_CTL 0x%x\n", tmp); +} +EXPORT_SYMBOL_GPL(ahci_init_controller); + +static void ahci_dev_config(struct ata_device *dev) +{ + struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; + + if (hpriv->flags & AHCI_HFLAG_SECT255) { + dev->max_sectors = 255; + ata_dev_info(dev, + "SB600 AHCI: limiting to 255 sectors per cmd\n"); + } +} + +unsigned int ahci_dev_classify(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ata_taskfile tf; + u32 tmp; + + tmp = readl(port_mmio + PORT_SIG); + tf.lbah = (tmp >> 24) & 0xff; + tf.lbam = (tmp >> 16) & 0xff; + tf.lbal = (tmp >> 8) & 0xff; + tf.nsect = (tmp) & 0xff; + + return ata_dev_classify(&tf); +} +EXPORT_SYMBOL_GPL(ahci_dev_classify); + +void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, + u32 opts) +{ + dma_addr_t cmd_tbl_dma; + + cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; + + pp->cmd_slot[tag].opts = cpu_to_le32(opts); + pp->cmd_slot[tag].status = 0; + pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); + pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); +} +EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot); + +int ahci_kick_engine(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_host_priv *hpriv = ap->host->private_data; + u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; + u32 tmp; + int busy, rc; + + /* stop engine */ + rc = ahci_stop_engine(ap); + if (rc) + goto out_restart; + + /* need to do CLO? + * always do CLO if PMP is attached (AHCI-1.3 9.2) + */ + busy = status & (ATA_BUSY | ATA_DRQ); + if (!busy && !sata_pmp_attached(ap)) { + rc = 0; + goto out_restart; + } + + if (!(hpriv->cap & HOST_CAP_CLO)) { + rc = -EOPNOTSUPP; + goto out_restart; + } + + /* perform CLO */ + tmp = readl(port_mmio + PORT_CMD); + tmp |= PORT_CMD_CLO; + writel(tmp, port_mmio + PORT_CMD); + + rc = 0; + tmp = ata_wait_register(ap, port_mmio + PORT_CMD, + PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); + if (tmp & PORT_CMD_CLO) + rc = -EIO; + + /* restart engine */ + out_restart: + hpriv->start_engine(ap); + return rc; +} +EXPORT_SYMBOL_GPL(ahci_kick_engine); + +static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, + struct ata_taskfile *tf, int is_cmd, u16 flags, + unsigned long timeout_msec) +{ + const u32 cmd_fis_len = 5; /* five dwords */ + struct ahci_port_priv *pp = ap->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u8 *fis = pp->cmd_tbl; + u32 tmp; + + /* prep the command */ + ata_tf_to_fis(tf, pmp, is_cmd, fis); + ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); + + /* issue & wait */ + writel(1, port_mmio + PORT_CMD_ISSUE); + + if (timeout_msec) { + tmp = ata_wait_register(ap, port_mmio + PORT_CMD_ISSUE, + 0x1, 0x1, 1, timeout_msec); + if (tmp & 0x1) { + ahci_kick_engine(ap); + return -EBUSY; + } + } else + readl(port_mmio + PORT_CMD_ISSUE); /* flush */ + + return 0; +} + +int ahci_do_softreset(struct ata_link *link, unsigned int *class, + int pmp, unsigned long deadline, + int (*check_ready)(struct ata_link *link)) +{ + struct ata_port *ap = link->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + const char *reason = NULL; + unsigned long now, msecs; + struct ata_taskfile tf; + bool fbs_disabled = false; + int rc; + + DPRINTK("ENTER\n"); + + /* prepare for SRST (AHCI-1.1 10.4.1) */ + rc = ahci_kick_engine(ap); + if (rc && rc != -EOPNOTSUPP) + ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc); + + /* + * According to AHCI-1.2 9.3.9: if FBS is enable, software shall + * clear PxFBS.EN to '0' prior to issuing software reset to devices + * that is attached to port multiplier. + */ + if (!ata_is_host_link(link) && pp->fbs_enabled) { + ahci_disable_fbs(ap); + fbs_disabled = true; + } + + ata_tf_init(link->device, &tf); + + /* issue the first D2H Register FIS */ + msecs = 0; + now = jiffies; + if (time_after(deadline, now)) + msecs = jiffies_to_msecs(deadline - now); + + tf.ctl |= ATA_SRST; + if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, + AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { + rc = -EIO; + reason = "1st FIS failed"; + goto fail; + } + + /* spec says at least 5us, but be generous and sleep for 1ms */ + ata_msleep(ap, 1); + + /* issue the second D2H Register FIS */ + tf.ctl &= ~ATA_SRST; + ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); + + /* wait for link to become ready */ + rc = ata_wait_after_reset(link, deadline, check_ready); + if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) { + /* + * Workaround for cases where link online status can't + * be trusted. Treat device readiness timeout as link + * offline. + */ + ata_link_info(link, "device not ready, treating as offline\n"); + *class = ATA_DEV_NONE; + } else if (rc) { + /* link occupied, -ENODEV too is an error */ + reason = "device not ready"; + goto fail; + } else + *class = ahci_dev_classify(ap); + + /* re-enable FBS if disabled before */ + if (fbs_disabled) + ahci_enable_fbs(ap); + + DPRINTK("EXIT, class=%u\n", *class); + return 0; + + fail: + ata_link_err(link, "softreset failed (%s)\n", reason); + return rc; +} + +int ahci_check_ready(struct ata_link *link) +{ + void __iomem *port_mmio = ahci_port_base(link->ap); + u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; + + return ata_check_ready(status); +} +EXPORT_SYMBOL_GPL(ahci_check_ready); + +static int ahci_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + int pmp = sata_srst_pmp(link); + + DPRINTK("ENTER\n"); + + return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); +} +EXPORT_SYMBOL_GPL(ahci_do_softreset); + +static int ahci_bad_pmp_check_ready(struct ata_link *link) +{ + void __iomem *port_mmio = ahci_port_base(link->ap); + u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; + u32 irq_status = readl(port_mmio + PORT_IRQ_STAT); + + /* + * There is no need to check TFDATA if BAD PMP is found due to HW bug, + * which can save timeout delay. + */ + if (irq_status & PORT_IRQ_BAD_PMP) + return -EIO; + + return ata_check_ready(status); +} + +static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + void __iomem *port_mmio = ahci_port_base(ap); + int pmp = sata_srst_pmp(link); + int rc; + u32 irq_sts; + + DPRINTK("ENTER\n"); + + rc = ahci_do_softreset(link, class, pmp, deadline, + ahci_bad_pmp_check_ready); + + /* + * Soft reset fails with IPMS set when PMP is enabled but + * SATA HDD/ODD is connected to SATA port, do soft reset + * again to port 0. + */ + if (rc == -EIO) { + irq_sts = readl(port_mmio + PORT_IRQ_STAT); + if (irq_sts & PORT_IRQ_BAD_PMP) { + ata_link_warn(link, + "applying PMP SRST workaround " + "and retrying\n"); + rc = ahci_do_softreset(link, class, 0, deadline, + ahci_check_ready); + } + } + + return rc; +} + +static int ahci_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_host_priv *hpriv = ap->host->private_data; + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; + struct ata_taskfile tf; + bool online; + int rc; + + DPRINTK("ENTER\n"); + + ahci_stop_engine(ap); + + /* clear D2H reception area to properly wait for D2H FIS */ + ata_tf_init(link->device, &tf); + tf.command = ATA_BUSY; + ata_tf_to_fis(&tf, 0, 0, d2h_fis); + + rc = sata_link_hardreset(link, timing, deadline, &online, + ahci_check_ready); + + hpriv->start_engine(ap); + + if (online) + *class = ahci_dev_classify(ap); + + DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); + return rc; +} + +static void ahci_postreset(struct ata_link *link, unsigned int *class) +{ + struct ata_port *ap = link->ap; + void __iomem *port_mmio = ahci_port_base(ap); + u32 new_tmp, tmp; + + ata_std_postreset(link, class); + + /* Make sure port's ATAPI bit is set appropriately */ + new_tmp = tmp = readl(port_mmio + PORT_CMD); + if (*class == ATA_DEV_ATAPI) + new_tmp |= PORT_CMD_ATAPI; + else + new_tmp &= ~PORT_CMD_ATAPI; + if (new_tmp != tmp) { + writel(new_tmp, port_mmio + PORT_CMD); + readl(port_mmio + PORT_CMD); /* flush */ + } +} + +static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) +{ + struct scatterlist *sg; + struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; + unsigned int si; + + VPRINTK("ENTER\n"); + + /* + * Next, the S/G list. + */ + for_each_sg(qc->sg, sg, qc->n_elem, si) { + dma_addr_t addr = sg_dma_address(sg); + u32 sg_len = sg_dma_len(sg); + + ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff); + ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); + ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1); + } + + return si; +} + +static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ahci_port_priv *pp = ap->private_data; + + if (!sata_pmp_attached(ap) || pp->fbs_enabled) + return ata_std_qc_defer(qc); + else + return sata_pmp_qc_defer_cmd_switch(qc); +} + +static void ahci_qc_prep(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ahci_port_priv *pp = ap->private_data; + int is_atapi = ata_is_atapi(qc->tf.protocol); + void *cmd_tbl; + u32 opts; + const u32 cmd_fis_len = 5; /* five dwords */ + unsigned int n_elem; + + /* + * Fill in command table information. First, the header, + * a SATA Register - Host to Device command FIS. + */ + cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; + + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); + if (is_atapi) { + memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); + memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); + } + + n_elem = 0; + if (qc->flags & ATA_QCFLAG_DMAMAP) + n_elem = ahci_fill_sg(qc, cmd_tbl); + + /* + * Fill in command slot information. + */ + opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); + if (qc->tf.flags & ATA_TFLAG_WRITE) + opts |= AHCI_CMD_WRITE; + if (is_atapi) + opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; + + ahci_fill_cmd_slot(pp, qc->tag, opts); +} + +static void ahci_fbs_dec_intr(struct ata_port *ap) +{ + struct ahci_port_priv *pp = ap->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 fbs = readl(port_mmio + PORT_FBS); + int retries = 3; + + DPRINTK("ENTER\n"); + BUG_ON(!pp->fbs_enabled); + + /* time to wait for DEC is not specified by AHCI spec, + * add a retry loop for safety. + */ + writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS); + fbs = readl(port_mmio + PORT_FBS); + while ((fbs & PORT_FBS_DEC) && retries--) { + udelay(1); + fbs = readl(port_mmio + PORT_FBS); + } + + if (fbs & PORT_FBS_DEC) + dev_err(ap->host->dev, "failed to clear device error\n"); +} + +static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + struct ata_eh_info *host_ehi = &ap->link.eh_info; + struct ata_link *link = NULL; + struct ata_queued_cmd *active_qc; + struct ata_eh_info *active_ehi; + bool fbs_need_dec = false; + u32 serror; + + /* determine active link with error */ + if (pp->fbs_enabled) { + void __iomem *port_mmio = ahci_port_base(ap); + u32 fbs = readl(port_mmio + PORT_FBS); + int pmp = fbs >> PORT_FBS_DWE_OFFSET; + + if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) { + link = &ap->pmp_link[pmp]; + fbs_need_dec = true; + } + + } else + ata_for_each_link(link, ap, EDGE) + if (ata_link_active(link)) + break; + + if (!link) + link = &ap->link; + + active_qc = ata_qc_from_tag(ap, link->active_tag); + active_ehi = &link->eh_info; + + /* record irq stat */ + ata_ehi_clear_desc(host_ehi); + ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); + + /* AHCI needs SError cleared; otherwise, it might lock up */ + ahci_scr_read(&ap->link, SCR_ERROR, &serror); + ahci_scr_write(&ap->link, SCR_ERROR, serror); + host_ehi->serror |= serror; + + /* some controllers set IRQ_IF_ERR on device errors, ignore it */ + if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) + irq_stat &= ~PORT_IRQ_IF_ERR; + + if (irq_stat & PORT_IRQ_TF_ERR) { + /* If qc is active, charge it; otherwise, the active + * link. There's no active qc on NCQ errors. It will + * be determined by EH by reading log page 10h. + */ + if (active_qc) + active_qc->err_mask |= AC_ERR_DEV; + else + active_ehi->err_mask |= AC_ERR_DEV; + + if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) + host_ehi->serror &= ~SERR_INTERNAL; + } + + if (irq_stat & PORT_IRQ_UNK_FIS) { + u32 *unk = pp->rx_fis + RX_FIS_UNK; + + active_ehi->err_mask |= AC_ERR_HSM; + active_ehi->action |= ATA_EH_RESET; + ata_ehi_push_desc(active_ehi, + "unknown FIS %08x %08x %08x %08x" , + unk[0], unk[1], unk[2], unk[3]); + } + + if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) { + active_ehi->err_mask |= AC_ERR_HSM; + active_ehi->action |= ATA_EH_RESET; + ata_ehi_push_desc(active_ehi, "incorrect PMP"); + } + + if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { + host_ehi->err_mask |= AC_ERR_HOST_BUS; + host_ehi->action |= ATA_EH_RESET; + ata_ehi_push_desc(host_ehi, "host bus error"); + } + + if (irq_stat & PORT_IRQ_IF_ERR) { + if (fbs_need_dec) + active_ehi->err_mask |= AC_ERR_DEV; + else { + host_ehi->err_mask |= AC_ERR_ATA_BUS; + host_ehi->action |= ATA_EH_RESET; + } + + ata_ehi_push_desc(host_ehi, "interface fatal error"); + } + + if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { + ata_ehi_hotplugged(host_ehi); + ata_ehi_push_desc(host_ehi, "%s", + irq_stat & PORT_IRQ_CONNECT ? + "connection status changed" : "PHY RDY changed"); + } + + /* okay, let's hand over to EH */ + + if (irq_stat & PORT_IRQ_FREEZE) + ata_port_freeze(ap); + else if (fbs_need_dec) { + ata_link_abort(link); + ahci_fbs_dec_intr(ap); + } else + ata_port_abort(ap); +} + +static void ahci_handle_port_interrupt(struct ata_port *ap, + void __iomem *port_mmio, u32 status) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_host_priv *hpriv = ap->host->private_data; + int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); + u32 qc_active = 0; + int rc; + + /* ignore BAD_PMP while resetting */ + if (unlikely(resetting)) + status &= ~PORT_IRQ_BAD_PMP; + + /* if LPM is enabled, PHYRDY doesn't mean anything */ + if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) { + status &= ~PORT_IRQ_PHYRDY; + ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG); + } + + if (unlikely(status & PORT_IRQ_ERROR)) { + ahci_error_intr(ap, status); + return; + } + + if (status & PORT_IRQ_SDB_FIS) { + /* If SNotification is available, leave notification + * handling to sata_async_notification(). If not, + * emulate it by snooping SDB FIS RX area. + * + * Snooping FIS RX area is probably cheaper than + * poking SNotification but some constrollers which + * implement SNotification, ICH9 for example, don't + * store AN SDB FIS into receive area. + */ + if (hpriv->cap & HOST_CAP_SNTF) + sata_async_notification(ap); + else { + /* If the 'N' bit in word 0 of the FIS is set, + * we just received asynchronous notification. + * Tell libata about it. + * + * Lack of SNotification should not appear in + * ahci 1.2, so the workaround is unnecessary + * when FBS is enabled. + */ + if (pp->fbs_enabled) + WARN_ON_ONCE(1); + else { + const __le32 *f = pp->rx_fis + RX_FIS_SDB; + u32 f0 = le32_to_cpu(f[0]); + if (f0 & (1 << 15)) + sata_async_notification(ap); + } + } + } + + /* pp->active_link is not reliable once FBS is enabled, both + * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because + * NCQ and non-NCQ commands may be in flight at the same time. + */ + if (pp->fbs_enabled) { + if (ap->qc_active) { + qc_active = readl(port_mmio + PORT_SCR_ACT); + qc_active |= readl(port_mmio + PORT_CMD_ISSUE); + } + } else { + /* pp->active_link is valid iff any command is in flight */ + if (ap->qc_active && pp->active_link->sactive) + qc_active = readl(port_mmio + PORT_SCR_ACT); + else + qc_active = readl(port_mmio + PORT_CMD_ISSUE); + } + + + rc = ata_qc_complete_multiple(ap, qc_active); + + /* while resetting, invalid completions are expected */ + if (unlikely(rc < 0 && !resetting)) { + ehi->err_mask |= AC_ERR_HSM; + ehi->action |= ATA_EH_RESET; + ata_port_freeze(ap); + } +} + +static void ahci_port_intr(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 status; + + status = readl(port_mmio + PORT_IRQ_STAT); + writel(status, port_mmio + PORT_IRQ_STAT); + + ahci_handle_port_interrupt(ap, port_mmio, status); +} + +irqreturn_t ahci_thread_fn(int irq, void *dev_instance) +{ + struct ata_port *ap = dev_instance; + struct ahci_port_priv *pp = ap->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + unsigned long flags; + u32 status; + + spin_lock_irqsave(&ap->host->lock, flags); + status = pp->intr_status; + if (status) + pp->intr_status = 0; + spin_unlock_irqrestore(&ap->host->lock, flags); + + spin_lock_bh(ap->lock); + ahci_handle_port_interrupt(ap, port_mmio, status); + spin_unlock_bh(ap->lock); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL_GPL(ahci_thread_fn); + +static void ahci_hw_port_interrupt(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_port_priv *pp = ap->private_data; + u32 status; + + status = readl(port_mmio + PORT_IRQ_STAT); + writel(status, port_mmio + PORT_IRQ_STAT); + + pp->intr_status |= status; +} + +irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance) +{ + struct ata_port *ap_this = dev_instance; + struct ahci_port_priv *pp = ap_this->private_data; + struct ata_host *host = ap_this->host; + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + unsigned int i; + u32 irq_stat, irq_masked; + + VPRINTK("ENTER\n"); + + spin_lock(&host->lock); + + irq_stat = readl(mmio + HOST_IRQ_STAT); + + if (!irq_stat) { + u32 status = pp->intr_status; + + spin_unlock(&host->lock); + + VPRINTK("EXIT\n"); + + return status ? IRQ_WAKE_THREAD : IRQ_NONE; + } + + irq_masked = irq_stat & hpriv->port_map; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap; + + if (!(irq_masked & (1 << i))) + continue; + + ap = host->ports[i]; + if (ap) { + ahci_hw_port_interrupt(ap); + VPRINTK("port %u\n", i); + } else { + VPRINTK("port %u (no irq)\n", i); + if (ata_ratelimit()) + dev_warn(host->dev, + "interrupt on disabled port %u\n", i); + } + } + + writel(irq_stat, mmio + HOST_IRQ_STAT); + + spin_unlock(&host->lock); + + VPRINTK("EXIT\n"); + + return IRQ_WAKE_THREAD; +} +EXPORT_SYMBOL_GPL(ahci_hw_interrupt); + +irqreturn_t ahci_interrupt(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + struct ahci_host_priv *hpriv; + unsigned int i, handled = 0; + void __iomem *mmio; + u32 irq_stat, irq_masked; + + VPRINTK("ENTER\n"); + + hpriv = host->private_data; + mmio = hpriv->mmio; + + /* sigh. 0xffffffff is a valid return from h/w */ + irq_stat = readl(mmio + HOST_IRQ_STAT); + if (!irq_stat) + return IRQ_NONE; + + irq_masked = irq_stat & hpriv->port_map; + + spin_lock(&host->lock); + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap; + + if (!(irq_masked & (1 << i))) + continue; + + ap = host->ports[i]; + if (ap) { + ahci_port_intr(ap); + VPRINTK("port %u\n", i); + } else { + VPRINTK("port %u (no irq)\n", i); + if (ata_ratelimit()) + dev_warn(host->dev, + "interrupt on disabled port %u\n", i); + } + + handled = 1; + } + + /* HOST_IRQ_STAT behaves as level triggered latch meaning that + * it should be cleared after all the port events are cleared; + * otherwise, it will raise a spurious interrupt after each + * valid one. Please read section 10.6.2 of ahci 1.1 for more + * information. + * + * Also, use the unmasked value to clear interrupt as spurious + * pending event on a dummy port might cause screaming IRQ. + */ + writel(irq_stat, mmio + HOST_IRQ_STAT); + + spin_unlock(&host->lock); + + VPRINTK("EXIT\n"); + + return IRQ_RETVAL(handled); +} +EXPORT_SYMBOL_GPL(ahci_interrupt); + +unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_port_priv *pp = ap->private_data; + + /* Keep track of the currently active link. It will be used + * in completion path to determine whether NCQ phase is in + * progress. + */ + pp->active_link = qc->dev->link; + + if (qc->tf.protocol == ATA_PROT_NCQ) + writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); + + if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) { + u32 fbs = readl(port_mmio + PORT_FBS); + fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC); + fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET; + writel(fbs, port_mmio + PORT_FBS); + pp->fbs_last_dev = qc->dev->link->pmp; + } + + writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); + + ahci_sw_activity(qc->dev->link); + + return 0; +} +EXPORT_SYMBOL_GPL(ahci_qc_issue); + +static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) +{ + struct ahci_port_priv *pp = qc->ap->private_data; + u8 *rx_fis = pp->rx_fis; + + if (pp->fbs_enabled) + rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ; + + /* + * After a successful execution of an ATA PIO data-in command, + * the device doesn't send D2H Reg FIS to update the TF and + * the host should take TF and E_Status from the preceding PIO + * Setup FIS. + */ + if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && + !(qc->flags & ATA_QCFLAG_FAILED)) { + ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); + qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; + } else + ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); + + return true; +} + +static void ahci_freeze(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + + /* turn IRQ off */ + writel(0, port_mmio + PORT_IRQ_MASK); +} + +static void ahci_thaw(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *mmio = hpriv->mmio; + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp; + struct ahci_port_priv *pp = ap->private_data; + + /* clear IRQ */ + tmp = readl(port_mmio + PORT_IRQ_STAT); + writel(tmp, port_mmio + PORT_IRQ_STAT); + writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); + + /* turn IRQ back on */ + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); +} + +void ahci_error_handler(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + + if (!(ap->pflags & ATA_PFLAG_FROZEN)) { + /* restart engine */ + ahci_stop_engine(ap); + hpriv->start_engine(ap); + } + + sata_pmp_error_handler(ap); + + if (!ata_dev_enabled(ap->link.device)) + ahci_stop_engine(ap); +} +EXPORT_SYMBOL_GPL(ahci_error_handler); + +static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + /* make DMA engine forget about the failed command */ + if (qc->flags & ATA_QCFLAG_FAILED) + ahci_kick_engine(ap); +} + +static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + struct ata_device *dev = ap->link.device; + u32 devslp, dm, dito, mdat, deto; + int rc; + unsigned int err_mask; + + devslp = readl(port_mmio + PORT_DEVSLP); + if (!(devslp & PORT_DEVSLP_DSP)) { + dev_err(ap->host->dev, "port does not support device sleep\n"); + return; + } + + /* disable device sleep */ + if (!sleep) { + if (devslp & PORT_DEVSLP_ADSE) { + writel(devslp & ~PORT_DEVSLP_ADSE, + port_mmio + PORT_DEVSLP); + err_mask = ata_dev_set_feature(dev, + SETFEATURES_SATA_DISABLE, + SATA_DEVSLP); + if (err_mask && err_mask != AC_ERR_DEV) + ata_dev_warn(dev, "failed to disable DEVSLP\n"); + } + return; + } + + /* device sleep was already enabled */ + if (devslp & PORT_DEVSLP_ADSE) + return; + + /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */ + rc = ahci_stop_engine(ap); + if (rc) + return; + + dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET; + dito = devslp_idle_timeout / (dm + 1); + if (dito > 0x3ff) + dito = 0x3ff; + + /* Use the nominal value 10 ms if the read MDAT is zero, + * the nominal value of DETO is 20 ms. + */ + if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] & + ATA_LOG_DEVSLP_VALID_MASK) { + mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] & + ATA_LOG_DEVSLP_MDAT_MASK; + if (!mdat) + mdat = 10; + deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO]; + if (!deto) + deto = 20; + } else { + mdat = 10; + deto = 20; + } + + devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) | + (mdat << PORT_DEVSLP_MDAT_OFFSET) | + (deto << PORT_DEVSLP_DETO_OFFSET) | + PORT_DEVSLP_ADSE); + writel(devslp, port_mmio + PORT_DEVSLP); + + hpriv->start_engine(ap); + + /* enable device sleep feature for the drive */ + err_mask = ata_dev_set_feature(dev, + SETFEATURES_SATA_ENABLE, + SATA_DEVSLP); + if (err_mask && err_mask != AC_ERR_DEV) + ata_dev_warn(dev, "failed to enable DEVSLP\n"); +} + +static void ahci_enable_fbs(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 fbs; + int rc; + + if (!pp->fbs_supported) + return; + + fbs = readl(port_mmio + PORT_FBS); + if (fbs & PORT_FBS_EN) { + pp->fbs_enabled = true; + pp->fbs_last_dev = -1; /* initialization */ + return; + } + + rc = ahci_stop_engine(ap); + if (rc) + return; + + writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS); + fbs = readl(port_mmio + PORT_FBS); + if (fbs & PORT_FBS_EN) { + dev_info(ap->host->dev, "FBS is enabled\n"); + pp->fbs_enabled = true; + pp->fbs_last_dev = -1; /* initialization */ + } else + dev_err(ap->host->dev, "Failed to enable FBS\n"); + + hpriv->start_engine(ap); +} + +static void ahci_disable_fbs(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 fbs; + int rc; + + if (!pp->fbs_supported) + return; + + fbs = readl(port_mmio + PORT_FBS); + if ((fbs & PORT_FBS_EN) == 0) { + pp->fbs_enabled = false; + return; + } + + rc = ahci_stop_engine(ap); + if (rc) + return; + + writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS); + fbs = readl(port_mmio + PORT_FBS); + if (fbs & PORT_FBS_EN) + dev_err(ap->host->dev, "Failed to disable FBS\n"); + else { + dev_info(ap->host->dev, "FBS is disabled\n"); + pp->fbs_enabled = false; + } + + hpriv->start_engine(ap); +} + +static void ahci_pmp_attach(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_port_priv *pp = ap->private_data; + u32 cmd; + + cmd = readl(port_mmio + PORT_CMD); + cmd |= PORT_CMD_PMP; + writel(cmd, port_mmio + PORT_CMD); + + ahci_enable_fbs(ap); + + pp->intr_mask |= PORT_IRQ_BAD_PMP; + + /* + * We must not change the port interrupt mask register if the + * port is marked frozen, the value in pp->intr_mask will be + * restored later when the port is thawed. + * + * Note that during initialization, the port is marked as + * frozen since the irq handler is not yet registered. + */ + if (!(ap->pflags & ATA_PFLAG_FROZEN)) + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); +} + +static void ahci_pmp_detach(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_port_priv *pp = ap->private_data; + u32 cmd; + + ahci_disable_fbs(ap); + + cmd = readl(port_mmio + PORT_CMD); + cmd &= ~PORT_CMD_PMP; + writel(cmd, port_mmio + PORT_CMD); + + pp->intr_mask &= ~PORT_IRQ_BAD_PMP; + + /* see comment above in ahci_pmp_attach() */ + if (!(ap->pflags & ATA_PFLAG_FROZEN)) + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); +} + +int ahci_port_resume(struct ata_port *ap) +{ + ahci_power_up(ap); + ahci_start_port(ap); + + if (sata_pmp_attached(ap)) + ahci_pmp_attach(ap); + else + ahci_pmp_detach(ap); + + return 0; +} +EXPORT_SYMBOL_GPL(ahci_port_resume); + +#ifdef CONFIG_PM +static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) +{ + const char *emsg = NULL; + int rc; + + rc = ahci_deinit_port(ap, &emsg); + if (rc == 0) + ahci_power_down(ap); + else { + ata_port_err(ap, "%s (%d)\n", emsg, rc); + ata_port_freeze(ap); + } + + return rc; +} +#endif + +static int ahci_port_start(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct device *dev = ap->host->dev; + struct ahci_port_priv *pp; + void *mem; + dma_addr_t mem_dma; + size_t dma_sz, rx_fis_sz; + + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; + + if (ap->host->n_ports > 1) { + pp->irq_desc = devm_kzalloc(dev, 8, GFP_KERNEL); + if (!pp->irq_desc) { + devm_kfree(dev, pp); + return -ENOMEM; + } + snprintf(pp->irq_desc, 8, + "%s%d", dev_driver_string(dev), ap->port_no); + } + + /* check FBS capability */ + if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) { + void __iomem *port_mmio = ahci_port_base(ap); + u32 cmd = readl(port_mmio + PORT_CMD); + if (cmd & PORT_CMD_FBSCP) + pp->fbs_supported = true; + else if (hpriv->flags & AHCI_HFLAG_YES_FBS) { + dev_info(dev, "port %d can do FBS, forcing FBSCP\n", + ap->port_no); + pp->fbs_supported = true; + } else + dev_warn(dev, "port %d is not capable of FBS\n", + ap->port_no); + } + + if (pp->fbs_supported) { + dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ; + rx_fis_sz = AHCI_RX_FIS_SZ * 16; + } else { + dma_sz = AHCI_PORT_PRIV_DMA_SZ; + rx_fis_sz = AHCI_RX_FIS_SZ; + } + + mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL); + if (!mem) + return -ENOMEM; + memset(mem, 0, dma_sz); + + /* + * First item in chunk of DMA memory: 32-slot command table, + * 32 bytes each in size + */ + pp->cmd_slot = mem; + pp->cmd_slot_dma = mem_dma; + + mem += AHCI_CMD_SLOT_SZ; + mem_dma += AHCI_CMD_SLOT_SZ; + + /* + * Second item: Received-FIS area + */ + pp->rx_fis = mem; + pp->rx_fis_dma = mem_dma; + + mem += rx_fis_sz; + mem_dma += rx_fis_sz; + + /* + * Third item: data area for storing a single command + * and its scatter-gather table + */ + pp->cmd_tbl = mem; + pp->cmd_tbl_dma = mem_dma; + + /* + * Save off initial list of interrupts to be enabled. + * This could be changed later + */ + pp->intr_mask = DEF_PORT_IRQ; + + /* + * Switch to per-port locking in case each port has its own MSI vector. + */ + if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) { + spin_lock_init(&pp->lock); + ap->lock = &pp->lock; + } + + ap->private_data = pp; + + /* engage engines, captain */ + return ahci_port_resume(ap); +} + +static void ahci_port_stop(struct ata_port *ap) +{ + const char *emsg = NULL; + int rc; + + /* de-initialize port */ + rc = ahci_deinit_port(ap, &emsg); + if (rc) + ata_port_warn(ap, "%s (%d)\n", emsg, rc); +} + +void ahci_print_info(struct ata_host *host, const char *scc_s) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 vers, cap, cap2, impl, speed; + const char *speed_s; + + vers = readl(mmio + HOST_VERSION); + cap = hpriv->cap; + cap2 = hpriv->cap2; + impl = hpriv->port_map; + + speed = (cap >> 20) & 0xf; + if (speed == 1) + speed_s = "1.5"; + else if (speed == 2) + speed_s = "3"; + else if (speed == 3) + speed_s = "6"; + else + speed_s = "?"; + + dev_info(host->dev, + "AHCI %02x%02x.%02x%02x " + "%u slots %u ports %s Gbps 0x%x impl %s mode\n" + , + + (vers >> 24) & 0xff, + (vers >> 16) & 0xff, + (vers >> 8) & 0xff, + vers & 0xff, + + ((cap >> 8) & 0x1f) + 1, + (cap & 0x1f) + 1, + speed_s, + impl, + scc_s); + + dev_info(host->dev, + "flags: " + "%s%s%s%s%s%s%s" + "%s%s%s%s%s%s%s" + "%s%s%s%s%s%s%s" + "%s%s\n" + , + + cap & HOST_CAP_64 ? "64bit " : "", + cap & HOST_CAP_NCQ ? "ncq " : "", + cap & HOST_CAP_SNTF ? "sntf " : "", + cap & HOST_CAP_MPS ? "ilck " : "", + cap & HOST_CAP_SSS ? "stag " : "", + cap & HOST_CAP_ALPM ? "pm " : "", + cap & HOST_CAP_LED ? "led " : "", + cap & HOST_CAP_CLO ? "clo " : "", + cap & HOST_CAP_ONLY ? "only " : "", + cap & HOST_CAP_PMP ? "pmp " : "", + cap & HOST_CAP_FBS ? "fbs " : "", + cap & HOST_CAP_PIO_MULTI ? "pio " : "", + cap & HOST_CAP_SSC ? "slum " : "", + cap & HOST_CAP_PART ? "part " : "", + cap & HOST_CAP_CCC ? "ccc " : "", + cap & HOST_CAP_EMS ? "ems " : "", + cap & HOST_CAP_SXS ? "sxs " : "", + cap2 & HOST_CAP2_DESO ? "deso " : "", + cap2 & HOST_CAP2_SADM ? "sadm " : "", + cap2 & HOST_CAP2_SDS ? "sds " : "", + cap2 & HOST_CAP2_APST ? "apst " : "", + cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "", + cap2 & HOST_CAP2_BOH ? "boh " : "" + ); +} +EXPORT_SYMBOL_GPL(ahci_print_info); + +void ahci_set_em_messages(struct ahci_host_priv *hpriv, + struct ata_port_info *pi) +{ + u8 messages; + void __iomem *mmio = hpriv->mmio; + u32 em_loc = readl(mmio + HOST_EM_LOC); + u32 em_ctl = readl(mmio + HOST_EM_CTL); + + if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS)) + return; + + messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16; + + if (messages) { + /* store em_loc */ + hpriv->em_loc = ((em_loc >> 16) * 4); + hpriv->em_buf_sz = ((em_loc & 0xff) * 4); + hpriv->em_msg_type = messages; + pi->flags |= ATA_FLAG_EM; + if (!(em_ctl & EM_CTL_ALHD)) + pi->flags |= ATA_FLAG_SW_ACTIVITY; + } +} +EXPORT_SYMBOL_GPL(ahci_set_em_messages); + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("Common AHCI SATA low-level routines"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c new file mode 100644 index 00000000000..b0077589f06 --- /dev/null +++ b/drivers/ata/libahci_platform.c @@ -0,0 +1,549 @@ +/* + * AHCI SATA platform library + * + * Copyright 2004-2005 Red Hat, Inc. + * Jeff Garzik <jgarzik@pobox.com> + * Copyright 2010 MontaVista Software, LLC. + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + */ + +#include <linux/clk.h> +#include <linux/kernel.h> +#include <linux/gfp.h> +#include <linux/module.h> +#include <linux/pm.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/libata.h> +#include <linux/ahci_platform.h> +#include <linux/phy/phy.h> +#include <linux/pm_runtime.h> +#include "ahci.h" + +static void ahci_host_stop(struct ata_host *host); + +struct ata_port_operations ahci_platform_ops = { + .inherits = &ahci_ops, + .host_stop = ahci_host_stop, +}; +EXPORT_SYMBOL_GPL(ahci_platform_ops); + +static struct scsi_host_template ahci_platform_sht = { + AHCI_SHT("ahci_platform"), +}; + +/** + * ahci_platform_enable_clks - Enable platform clocks + * @hpriv: host private area to store config values + * + * This function enables all the clks found in hpriv->clks, starting at + * index 0. If any clk fails to enable it disables all the clks already + * enabled in reverse order, and then returns an error. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_enable_clks(struct ahci_host_priv *hpriv) +{ + int c, rc; + + for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) { + rc = clk_prepare_enable(hpriv->clks[c]); + if (rc) + goto disable_unprepare_clk; + } + return 0; + +disable_unprepare_clk: + while (--c >= 0) + clk_disable_unprepare(hpriv->clks[c]); + return rc; +} +EXPORT_SYMBOL_GPL(ahci_platform_enable_clks); + +/** + * ahci_platform_disable_clks - Disable platform clocks + * @hpriv: host private area to store config values + * + * This function disables all the clks found in hpriv->clks, in reverse + * order of ahci_platform_enable_clks (starting at the end of the array). + */ +void ahci_platform_disable_clks(struct ahci_host_priv *hpriv) +{ + int c; + + for (c = AHCI_MAX_CLKS - 1; c >= 0; c--) + if (hpriv->clks[c]) + clk_disable_unprepare(hpriv->clks[c]); +} +EXPORT_SYMBOL_GPL(ahci_platform_disable_clks); + +/** + * ahci_platform_enable_resources - Enable platform resources + * @hpriv: host private area to store config values + * + * This function enables all ahci_platform managed resources in the + * following order: + * 1) Regulator + * 2) Clocks (through ahci_platform_enable_clks) + * 3) Phy + * + * If resource enabling fails at any point the previous enabled resources + * are disabled in reverse order. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_enable_resources(struct ahci_host_priv *hpriv) +{ + int rc; + + if (hpriv->target_pwr) { + rc = regulator_enable(hpriv->target_pwr); + if (rc) + return rc; + } + + rc = ahci_platform_enable_clks(hpriv); + if (rc) + goto disable_regulator; + + if (hpriv->phy) { + rc = phy_init(hpriv->phy); + if (rc) + goto disable_clks; + + rc = phy_power_on(hpriv->phy); + if (rc) { + phy_exit(hpriv->phy); + goto disable_clks; + } + } + + return 0; + +disable_clks: + ahci_platform_disable_clks(hpriv); + +disable_regulator: + if (hpriv->target_pwr) + regulator_disable(hpriv->target_pwr); + return rc; +} +EXPORT_SYMBOL_GPL(ahci_platform_enable_resources); + +/** + * ahci_platform_disable_resources - Disable platform resources + * @hpriv: host private area to store config values + * + * This function disables all ahci_platform managed resources in the + * following order: + * 1) Phy + * 2) Clocks (through ahci_platform_disable_clks) + * 3) Regulator + */ +void ahci_platform_disable_resources(struct ahci_host_priv *hpriv) +{ + if (hpriv->phy) { + phy_power_off(hpriv->phy); + phy_exit(hpriv->phy); + } + + ahci_platform_disable_clks(hpriv); + + if (hpriv->target_pwr) + regulator_disable(hpriv->target_pwr); +} +EXPORT_SYMBOL_GPL(ahci_platform_disable_resources); + +static void ahci_platform_put_resources(struct device *dev, void *res) +{ + struct ahci_host_priv *hpriv = res; + int c; + + if (hpriv->got_runtime_pm) { + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + } + + for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) + clk_put(hpriv->clks[c]); +} + +/** + * ahci_platform_get_resources - Get platform resources + * @pdev: platform device to get resources for + * + * This function allocates an ahci_host_priv struct, and gets the following + * resources, storing a reference to them inside the returned struct: + * + * 1) mmio registers (IORESOURCE_MEM 0, mandatory) + * 2) regulator for controlling the targets power (optional) + * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node, + * or for non devicetree enabled platforms a single clock + * 4) phy (optional) + * + * RETURNS: + * The allocated ahci_host_priv on success, otherwise an ERR_PTR value + */ +struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ahci_host_priv *hpriv; + struct clk *clk; + int i, rc = -ENOMEM; + + if (!devres_open_group(dev, NULL, GFP_KERNEL)) + return ERR_PTR(-ENOMEM); + + hpriv = devres_alloc(ahci_platform_put_resources, sizeof(*hpriv), + GFP_KERNEL); + if (!hpriv) + goto err_out; + + devres_add(dev, hpriv); + + hpriv->mmio = devm_ioremap_resource(dev, + platform_get_resource(pdev, IORESOURCE_MEM, 0)); + if (IS_ERR(hpriv->mmio)) { + dev_err(dev, "no mmio space\n"); + rc = PTR_ERR(hpriv->mmio); + goto err_out; + } + + hpriv->target_pwr = devm_regulator_get_optional(dev, "target"); + if (IS_ERR(hpriv->target_pwr)) { + rc = PTR_ERR(hpriv->target_pwr); + if (rc == -EPROBE_DEFER) + goto err_out; + hpriv->target_pwr = NULL; + } + + for (i = 0; i < AHCI_MAX_CLKS; i++) { + /* + * For now we must use clk_get(dev, NULL) for the first clock, + * because some platforms (da850, spear13xx) are not yet + * converted to use devicetree for clocks. For new platforms + * this is equivalent to of_clk_get(dev->of_node, 0). + */ + if (i == 0) + clk = clk_get(dev, NULL); + else + clk = of_clk_get(dev->of_node, i); + + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -EPROBE_DEFER) + goto err_out; + break; + } + hpriv->clks[i] = clk; + } + + hpriv->phy = devm_phy_get(dev, "sata-phy"); + if (IS_ERR(hpriv->phy)) { + rc = PTR_ERR(hpriv->phy); + switch (rc) { + case -ENOSYS: + /* No PHY support. Check if PHY is required. */ + if (of_find_property(dev->of_node, "phys", NULL)) { + dev_err(dev, "couldn't get sata-phy: ENOSYS\n"); + goto err_out; + } + case -ENODEV: + /* continue normally */ + hpriv->phy = NULL; + break; + + case -EPROBE_DEFER: + goto err_out; + + default: + dev_err(dev, "couldn't get sata-phy\n"); + goto err_out; + } + } + + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + hpriv->got_runtime_pm = true; + + devres_remove_group(dev, NULL); + return hpriv; + +err_out: + devres_release_group(dev, NULL); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(ahci_platform_get_resources); + +/** + * ahci_platform_init_host - Bring up an ahci-platform host + * @pdev: platform device pointer for the host + * @hpriv: ahci-host private data for the host + * @pi_template: template for the ata_port_info to use + * @host_flags: ahci host flags used in ahci_host_priv + * @force_port_map: param passed to ahci_save_initial_config + * @mask_port_map: param passed to ahci_save_initial_config + * + * This function does all the usual steps needed to bring up an + * ahci-platform host, note any necessary resources (ie clks, phy, etc.) + * must be initialized / enabled before calling this. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_init_host(struct platform_device *pdev, + struct ahci_host_priv *hpriv, + const struct ata_port_info *pi_template, + unsigned long host_flags, + unsigned int force_port_map, + unsigned int mask_port_map) +{ + struct device *dev = &pdev->dev; + struct ata_port_info pi = *pi_template; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct ata_host *host; + int i, irq, n_ports, rc; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "no irq\n"); + return -EINVAL; + } + + /* prepare host */ + pi.private_data = (void *)host_flags; + hpriv->flags |= host_flags; + + ahci_save_initial_config(dev, hpriv, force_port_map, mask_port_map); + + if (hpriv->cap & HOST_CAP_NCQ) + pi.flags |= ATA_FLAG_NCQ; + + if (hpriv->cap & HOST_CAP_PMP) + pi.flags |= ATA_FLAG_PMP; + + ahci_set_em_messages(hpriv, &pi); + + /* CAP.NP sometimes indicate the index of the last enabled + * port, at other times, that of the last possible port, so + * determining the maximum port number requires looking at + * both CAP.NP and port_map. + */ + n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); + + host = ata_host_alloc_pinfo(dev, ppi, n_ports); + if (!host) + return -ENOMEM; + + host->private_data = hpriv; + + if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) + host->flags |= ATA_HOST_PARALLEL_SCAN; + else + dev_info(dev, "SSS flag set, parallel bus scan disabled\n"); + + if (pi.flags & ATA_FLAG_EM) + ahci_reset_em(host); + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + ata_port_desc(ap, "mmio %pR", + platform_get_resource(pdev, IORESOURCE_MEM, 0)); + ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); + + /* set enclosure management message type */ + if (ap->flags & ATA_FLAG_EM) + ap->em_message_type = hpriv->em_msg_type; + + /* disabled/not-implemented port */ + if (!(hpriv->port_map & (1 << i))) + ap->ops = &ata_dummy_port_ops; + } + + rc = ahci_reset_controller(host); + if (rc) + return rc; + + ahci_init_controller(host); + ahci_print_info(host, "platform"); + + return ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, + &ahci_platform_sht); +} +EXPORT_SYMBOL_GPL(ahci_platform_init_host); + +static void ahci_host_stop(struct ata_host *host) +{ + struct device *dev = host->dev; + struct ahci_platform_data *pdata = dev_get_platdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + + if (pdata && pdata->exit) + pdata->exit(dev); + + ahci_platform_disable_resources(hpriv); +} + +#ifdef CONFIG_PM_SLEEP +/** + * ahci_platform_suspend_host - Suspend an ahci-platform host + * @dev: device pointer for the host + * + * This function does all the usual steps needed to suspend an + * ahci-platform host, note any necessary resources (ie clks, phy, etc.) + * must be disabled after calling this. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_suspend_host(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 ctl; + + if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { + dev_err(dev, "firmware update required for suspend/resume\n"); + return -EIO; + } + + /* + * AHCI spec rev1.1 section 8.3.3: + * Software must disable interrupts prior to requesting a + * transition of the HBA to D3 state. + */ + ctl = readl(mmio + HOST_CTL); + ctl &= ~HOST_IRQ_EN; + writel(ctl, mmio + HOST_CTL); + readl(mmio + HOST_CTL); /* flush */ + + return ata_host_suspend(host, PMSG_SUSPEND); +} +EXPORT_SYMBOL_GPL(ahci_platform_suspend_host); + +/** + * ahci_platform_resume_host - Resume an ahci-platform host + * @dev: device pointer for the host + * + * This function does all the usual steps needed to resume an ahci-platform + * host, note any necessary resources (ie clks, phy, etc.) must be + * initialized / enabled before calling this. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_resume_host(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + int rc; + + if (dev->power.power_state.event == PM_EVENT_SUSPEND) { + rc = ahci_reset_controller(host); + if (rc) + return rc; + + ahci_init_controller(host); + } + + ata_host_resume(host); + + return 0; +} +EXPORT_SYMBOL_GPL(ahci_platform_resume_host); + +/** + * ahci_platform_suspend - Suspend an ahci-platform device + * @dev: the platform device to suspend + * + * This function suspends the host associated with the device, followed by + * disabling all the resources of the device. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_suspend(struct device *dev) +{ + struct ahci_platform_data *pdata = dev_get_platdata(dev); + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + int rc; + + rc = ahci_platform_suspend_host(dev); + if (rc) + return rc; + + if (pdata && pdata->suspend) { + rc = pdata->suspend(dev); + if (rc) + goto resume_host; + } + + ahci_platform_disable_resources(hpriv); + + return 0; + +resume_host: + ahci_platform_resume_host(dev); + return rc; +} +EXPORT_SYMBOL_GPL(ahci_platform_suspend); + +/** + * ahci_platform_resume - Resume an ahci-platform device + * @dev: the platform device to resume + * + * This function enables all the resources of the device followed by + * resuming the host associated with the device. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_resume(struct device *dev) +{ + struct ahci_platform_data *pdata = dev_get_platdata(dev); + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + int rc; + + rc = ahci_platform_enable_resources(hpriv); + if (rc) + return rc; + + if (pdata && pdata->resume) { + rc = pdata->resume(dev); + if (rc) + goto disable_resources; + } + + rc = ahci_platform_resume_host(dev); + if (rc) + goto disable_resources; + + /* We resumed so update PM runtime state */ + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +disable_resources: + ahci_platform_disable_resources(hpriv); + + return rc; +} +EXPORT_SYMBOL_GPL(ahci_platform_resume); +#endif + +MODULE_DESCRIPTION("AHCI SATA platform library"); +MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c new file mode 100644 index 00000000000..97a14fe47de --- /dev/null +++ b/drivers/ata/libata-acpi.c @@ -0,0 +1,1054 @@ +/* + * libata-acpi.c + * Provides ACPI support for PATA/SATA. + * + * Copyright (C) 2006 Intel Corp. + * Copyright (C) 2006 Randy Dunlap + */ + +#include <linux/module.h> +#include <linux/ata.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/acpi.h> +#include <linux/libata.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/pm_runtime.h> +#include <scsi/scsi_device.h> +#include "libata.h" + +unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT; +module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644); +MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock, 0x4=DIPM, 0x8=FPDMA non-zero offset, 0x10=FPDMA DMA Setup FIS auto-activate)"); + +#define NO_PORT_MULT 0xffff +#define SATA_ADR(root, pmp) (((root) << 16) | (pmp)) + +#define REGS_PER_GTF 7 +struct ata_acpi_gtf { + u8 tf[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */ +} __packed; + +static void ata_acpi_clear_gtf(struct ata_device *dev) +{ + kfree(dev->gtf_cache); + dev->gtf_cache = NULL; +} + +struct ata_acpi_hotplug_context { + struct acpi_hotplug_context hp; + union { + struct ata_port *ap; + struct ata_device *dev; + } data; +}; + +#define ata_hotplug_data(context) (container_of((context), struct ata_acpi_hotplug_context, hp)->data) + +/** + * ata_dev_acpi_handle - provide the acpi_handle for an ata_device + * @dev: the acpi_handle returned will correspond to this device + * + * Returns the acpi_handle for the ACPI namespace object corresponding to + * the ata_device passed into the function, or NULL if no such object exists + * or ACPI is disabled for this device due to consecutive errors. + */ +acpi_handle ata_dev_acpi_handle(struct ata_device *dev) +{ + return dev->flags & ATA_DFLAG_ACPI_DISABLED ? + NULL : ACPI_HANDLE(&dev->tdev); +} + +/* @ap and @dev are the same as ata_acpi_handle_hotplug() */ +static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev) +{ + if (dev) + dev->flags |= ATA_DFLAG_DETACH; + else { + struct ata_link *tlink; + struct ata_device *tdev; + + ata_for_each_link(tlink, ap, EDGE) + ata_for_each_dev(tdev, tlink, ALL) + tdev->flags |= ATA_DFLAG_DETACH; + } + + ata_port_schedule_eh(ap); +} + +/** + * ata_acpi_handle_hotplug - ACPI event handler backend + * @ap: ATA port ACPI event occurred + * @dev: ATA device ACPI event occurred (can be NULL) + * @event: ACPI event which occurred + * + * All ACPI bay / device realted events end up in this function. If + * the event is port-wide @dev is NULL. If the event is specific to a + * device, @dev points to it. + * + * Hotplug (as opposed to unplug) notification is always handled as + * port-wide while unplug only kills the target device on device-wide + * event. + * + * LOCKING: + * ACPI notify handler context. May sleep. + */ +static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, + u32 event) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + int wait = 0; + unsigned long flags; + + spin_lock_irqsave(ap->lock, flags); + /* + * When dock driver calls into the routine, it will always use + * ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and + * ACPI_NOTIFY_EJECT_REQUEST for remove + */ + switch (event) { + case ACPI_NOTIFY_BUS_CHECK: + case ACPI_NOTIFY_DEVICE_CHECK: + ata_ehi_push_desc(ehi, "ACPI event"); + + ata_ehi_hotplugged(ehi); + ata_port_freeze(ap); + break; + case ACPI_NOTIFY_EJECT_REQUEST: + ata_ehi_push_desc(ehi, "ACPI event"); + + ata_acpi_detach_device(ap, dev); + wait = 1; + break; + } + + spin_unlock_irqrestore(ap->lock, flags); + + if (wait) + ata_port_wait_eh(ap); +} + +static int ata_acpi_dev_notify_dock(struct acpi_device *adev, u32 event) +{ + struct ata_device *dev = ata_hotplug_data(adev->hp).dev; + ata_acpi_handle_hotplug(dev->link->ap, dev, event); + return 0; +} + +static int ata_acpi_ap_notify_dock(struct acpi_device *adev, u32 event) +{ + ata_acpi_handle_hotplug(ata_hotplug_data(adev->hp).ap, NULL, event); + return 0; +} + +static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev, + u32 event) +{ + struct kobject *kobj = NULL; + char event_string[20]; + char *envp[] = { event_string, NULL }; + + if (dev) { + if (dev->sdev) + kobj = &dev->sdev->sdev_gendev.kobj; + } else + kobj = &ap->dev->kobj; + + if (kobj) { + snprintf(event_string, 20, "BAY_EVENT=%d", event); + kobject_uevent_env(kobj, KOBJ_CHANGE, envp); + } +} + +static void ata_acpi_ap_uevent(struct acpi_device *adev, u32 event) +{ + ata_acpi_uevent(ata_hotplug_data(adev->hp).ap, NULL, event); +} + +static void ata_acpi_dev_uevent(struct acpi_device *adev, u32 event) +{ + struct ata_device *dev = ata_hotplug_data(adev->hp).dev; + ata_acpi_uevent(dev->link->ap, dev, event); +} + +/* bind acpi handle to pata port */ +void ata_acpi_bind_port(struct ata_port *ap) +{ + struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev); + struct acpi_device *adev; + struct ata_acpi_hotplug_context *context; + + if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_companion) + return; + + acpi_preset_companion(&ap->tdev, host_companion, ap->port_no); + + if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) + ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; + + adev = ACPI_COMPANION(&ap->tdev); + if (!adev || adev->hp) + return; + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return; + + context->data.ap = ap; + acpi_initialize_hp_context(adev, &context->hp, ata_acpi_ap_notify_dock, + ata_acpi_ap_uevent); +} + +void ata_acpi_bind_dev(struct ata_device *dev) +{ + struct ata_port *ap = dev->link->ap; + struct acpi_device *port_companion = ACPI_COMPANION(&ap->tdev); + struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev); + struct acpi_device *parent, *adev; + struct ata_acpi_hotplug_context *context; + u64 adr; + + /* + * For both sata/pata devices, host companion device is required. + * For pata device, port companion device is also required. + */ + if (libata_noacpi || !host_companion || + (!(ap->flags & ATA_FLAG_ACPI_SATA) && !port_companion)) + return; + + if (ap->flags & ATA_FLAG_ACPI_SATA) { + if (!sata_pmp_attached(ap)) + adr = SATA_ADR(ap->port_no, NO_PORT_MULT); + else + adr = SATA_ADR(ap->port_no, dev->link->pmp); + parent = host_companion; + } else { + adr = dev->devno; + parent = port_companion; + } + + acpi_preset_companion(&dev->tdev, parent, adr); + adev = ACPI_COMPANION(&dev->tdev); + if (!adev || adev->hp) + return; + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return; + + context->data.dev = dev; + acpi_initialize_hp_context(adev, &context->hp, ata_acpi_dev_notify_dock, + ata_acpi_dev_uevent); +} + +/** + * ata_acpi_dissociate - dissociate ATA host from ACPI objects + * @host: target ATA host + * + * This function is called during driver detach after the whole host + * is shut down. + * + * LOCKING: + * EH context. + */ +void ata_acpi_dissociate(struct ata_host *host) +{ + int i; + + /* Restore initial _GTM values so that driver which attaches + * afterward can use them too. + */ + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); + + if (ACPI_HANDLE(&ap->tdev) && gtm) + ata_acpi_stm(ap, gtm); + } +} + +/** + * ata_acpi_gtm - execute _GTM + * @ap: target ATA port + * @gtm: out parameter for _GTM result + * + * Evaluate _GTM and store the result in @gtm. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure. + */ +int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm) +{ + struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER }; + union acpi_object *out_obj; + acpi_status status; + int rc = 0; + acpi_handle handle = ACPI_HANDLE(&ap->tdev); + + if (!handle) + return -EINVAL; + + status = acpi_evaluate_object(handle, "_GTM", NULL, &output); + + rc = -ENOENT; + if (status == AE_NOT_FOUND) + goto out_free; + + rc = -EINVAL; + if (ACPI_FAILURE(status)) { + ata_port_err(ap, "ACPI get timing mode failed (AE 0x%x)\n", + status); + goto out_free; + } + + out_obj = output.pointer; + if (out_obj->type != ACPI_TYPE_BUFFER) { + ata_port_warn(ap, "_GTM returned unexpected object type 0x%x\n", + out_obj->type); + + goto out_free; + } + + if (out_obj->buffer.length != sizeof(struct ata_acpi_gtm)) { + ata_port_err(ap, "_GTM returned invalid length %d\n", + out_obj->buffer.length); + goto out_free; + } + + memcpy(gtm, out_obj->buffer.pointer, sizeof(struct ata_acpi_gtm)); + rc = 0; + out_free: + kfree(output.pointer); + return rc; +} + +EXPORT_SYMBOL_GPL(ata_acpi_gtm); + +/** + * ata_acpi_stm - execute _STM + * @ap: target ATA port + * @stm: timing parameter to _STM + * + * Evaluate _STM with timing parameter @stm. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 0 on success, -ENOENT if _STM doesn't exist, -errno on failure. + */ +int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm) +{ + acpi_status status; + struct ata_acpi_gtm stm_buf = *stm; + struct acpi_object_list input; + union acpi_object in_params[3]; + + in_params[0].type = ACPI_TYPE_BUFFER; + in_params[0].buffer.length = sizeof(struct ata_acpi_gtm); + in_params[0].buffer.pointer = (u8 *)&stm_buf; + /* Buffers for id may need byteswapping ? */ + in_params[1].type = ACPI_TYPE_BUFFER; + in_params[1].buffer.length = 512; + in_params[1].buffer.pointer = (u8 *)ap->link.device[0].id; + in_params[2].type = ACPI_TYPE_BUFFER; + in_params[2].buffer.length = 512; + in_params[2].buffer.pointer = (u8 *)ap->link.device[1].id; + + input.count = 3; + input.pointer = in_params; + + status = acpi_evaluate_object(ACPI_HANDLE(&ap->tdev), "_STM", + &input, NULL); + + if (status == AE_NOT_FOUND) + return -ENOENT; + if (ACPI_FAILURE(status)) { + ata_port_err(ap, "ACPI set timing mode failed (status=0x%x)\n", + status); + return -EINVAL; + } + return 0; +} + +EXPORT_SYMBOL_GPL(ata_acpi_stm); + +/** + * ata_dev_get_GTF - get the drive bootup default taskfile settings + * @dev: target ATA device + * @gtf: output parameter for buffer containing _GTF taskfile arrays + * + * This applies to both PATA and SATA drives. + * + * The _GTF method has no input parameters. + * It returns a variable number of register set values (registers + * hex 1F1..1F7, taskfiles). + * The <variable number> is not known in advance, so have ACPI-CA + * allocate the buffer as needed and return it, then free it later. + * + * LOCKING: + * EH context. + * + * RETURNS: + * Number of taskfiles on success, 0 if _GTF doesn't exist. -EINVAL + * if _GTF is invalid. + */ +static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf) +{ + struct ata_port *ap = dev->link->ap; + acpi_status status; + struct acpi_buffer output; + union acpi_object *out_obj; + int rc = 0; + + /* if _GTF is cached, use the cached value */ + if (dev->gtf_cache) { + out_obj = dev->gtf_cache; + goto done; + } + + /* set up output buffer */ + output.length = ACPI_ALLOCATE_BUFFER; + output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ + + if (ata_msg_probe(ap)) + ata_dev_dbg(dev, "%s: ENTER: port#: %d\n", + __func__, ap->port_no); + + /* _GTF has no input parameters */ + status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_GTF", NULL, + &output); + out_obj = dev->gtf_cache = output.pointer; + + if (ACPI_FAILURE(status)) { + if (status != AE_NOT_FOUND) { + ata_dev_warn(dev, "_GTF evaluation failed (AE 0x%x)\n", + status); + rc = -EINVAL; + } + goto out_free; + } + + if (!output.length || !output.pointer) { + if (ata_msg_probe(ap)) + ata_dev_dbg(dev, "%s: Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n", + __func__, + (unsigned long long)output.length, + output.pointer); + rc = -EINVAL; + goto out_free; + } + + if (out_obj->type != ACPI_TYPE_BUFFER) { + ata_dev_warn(dev, "_GTF unexpected object type 0x%x\n", + out_obj->type); + rc = -EINVAL; + goto out_free; + } + + if (out_obj->buffer.length % REGS_PER_GTF) { + ata_dev_warn(dev, "unexpected _GTF length (%d)\n", + out_obj->buffer.length); + rc = -EINVAL; + goto out_free; + } + + done: + rc = out_obj->buffer.length / REGS_PER_GTF; + if (gtf) { + *gtf = (void *)out_obj->buffer.pointer; + if (ata_msg_probe(ap)) + ata_dev_dbg(dev, "%s: returning gtf=%p, gtf_count=%d\n", + __func__, *gtf, rc); + } + return rc; + + out_free: + ata_acpi_clear_gtf(dev); + return rc; +} + +/** + * ata_acpi_gtm_xfermode - determine xfermode from GTM parameter + * @dev: target device + * @gtm: GTM parameter to use + * + * Determine xfermask for @dev from @gtm. + * + * LOCKING: + * None. + * + * RETURNS: + * Determined xfermask. + */ +unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev, + const struct ata_acpi_gtm *gtm) +{ + unsigned long xfer_mask = 0; + unsigned int type; + int unit; + u8 mode; + + /* we always use the 0 slot for crap hardware */ + unit = dev->devno; + if (!(gtm->flags & 0x10)) + unit = 0; + + /* PIO */ + mode = ata_timing_cycle2mode(ATA_SHIFT_PIO, gtm->drive[unit].pio); + xfer_mask |= ata_xfer_mode2mask(mode); + + /* See if we have MWDMA or UDMA data. We don't bother with + * MWDMA if UDMA is available as this means the BIOS set UDMA + * and our error changedown if it works is UDMA to PIO anyway. + */ + if (!(gtm->flags & (1 << (2 * unit)))) + type = ATA_SHIFT_MWDMA; + else + type = ATA_SHIFT_UDMA; + + mode = ata_timing_cycle2mode(type, gtm->drive[unit].dma); + xfer_mask |= ata_xfer_mode2mask(mode); + + return xfer_mask; +} +EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask); + +/** + * ata_acpi_cbl_80wire - Check for 80 wire cable + * @ap: Port to check + * @gtm: GTM data to use + * + * Return 1 if the @gtm indicates the BIOS selected an 80wire mode. + */ +int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) +{ + struct ata_device *dev; + + ata_for_each_dev(dev, &ap->link, ENABLED) { + unsigned long xfer_mask, udma_mask; + + xfer_mask = ata_acpi_gtm_xfermask(dev, gtm); + ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask); + + if (udma_mask & ~ATA_UDMA_MASK_40C) + return 1; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire); + +static void ata_acpi_gtf_to_tf(struct ata_device *dev, + const struct ata_acpi_gtf *gtf, + struct ata_taskfile *tf) +{ + ata_tf_init(dev, tf); + + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf->protocol = ATA_PROT_NODATA; + tf->feature = gtf->tf[0]; /* 0x1f1 */ + tf->nsect = gtf->tf[1]; /* 0x1f2 */ + tf->lbal = gtf->tf[2]; /* 0x1f3 */ + tf->lbam = gtf->tf[3]; /* 0x1f4 */ + tf->lbah = gtf->tf[4]; /* 0x1f5 */ + tf->device = gtf->tf[5]; /* 0x1f6 */ + tf->command = gtf->tf[6]; /* 0x1f7 */ +} + +static int ata_acpi_filter_tf(struct ata_device *dev, + const struct ata_taskfile *tf, + const struct ata_taskfile *ptf) +{ + if (dev->gtf_filter & ATA_ACPI_FILTER_SETXFER) { + /* libata doesn't use ACPI to configure transfer mode. + * It will only confuse device configuration. Skip. + */ + if (tf->command == ATA_CMD_SET_FEATURES && + tf->feature == SETFEATURES_XFER) + return 1; + } + + if (dev->gtf_filter & ATA_ACPI_FILTER_LOCK) { + /* BIOS writers, sorry but we don't wanna lock + * features unless the user explicitly said so. + */ + + /* DEVICE CONFIGURATION FREEZE LOCK */ + if (tf->command == ATA_CMD_CONF_OVERLAY && + tf->feature == ATA_DCO_FREEZE_LOCK) + return 1; + + /* SECURITY FREEZE LOCK */ + if (tf->command == ATA_CMD_SEC_FREEZE_LOCK) + return 1; + + /* SET MAX LOCK and SET MAX FREEZE LOCK */ + if ((!ptf || ptf->command != ATA_CMD_READ_NATIVE_MAX) && + tf->command == ATA_CMD_SET_MAX && + (tf->feature == ATA_SET_MAX_LOCK || + tf->feature == ATA_SET_MAX_FREEZE_LOCK)) + return 1; + } + + if (tf->command == ATA_CMD_SET_FEATURES && + tf->feature == SETFEATURES_SATA_ENABLE) { + /* inhibit enabling DIPM */ + if (dev->gtf_filter & ATA_ACPI_FILTER_DIPM && + tf->nsect == SATA_DIPM) + return 1; + + /* inhibit FPDMA non-zero offset */ + if (dev->gtf_filter & ATA_ACPI_FILTER_FPDMA_OFFSET && + (tf->nsect == SATA_FPDMA_OFFSET || + tf->nsect == SATA_FPDMA_IN_ORDER)) + return 1; + + /* inhibit FPDMA auto activation */ + if (dev->gtf_filter & ATA_ACPI_FILTER_FPDMA_AA && + tf->nsect == SATA_FPDMA_AA) + return 1; + } + + return 0; +} + +/** + * ata_acpi_run_tf - send taskfile registers to host controller + * @dev: target ATA device + * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7) + * + * Outputs ATA taskfile to standard ATA host controller. + * Writes the control, feature, nsect, lbal, lbam, and lbah registers. + * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect, + * hob_lbal, hob_lbam, and hob_lbah. + * + * This function waits for idle (!BUSY and !DRQ) after writing + * registers. If the control register has a new value, this + * function also waits for idle after writing control and before + * writing the remaining registers. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 1 if command is executed successfully. 0 if ignored, rejected or + * filtered out, -errno on other errors. + */ +static int ata_acpi_run_tf(struct ata_device *dev, + const struct ata_acpi_gtf *gtf, + const struct ata_acpi_gtf *prev_gtf) +{ + struct ata_taskfile *pptf = NULL; + struct ata_taskfile tf, ptf, rtf; + unsigned int err_mask; + const char *level; + const char *descr; + char msg[60]; + int rc; + + if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0) + && (gtf->tf[3] == 0) && (gtf->tf[4] == 0) && (gtf->tf[5] == 0) + && (gtf->tf[6] == 0)) + return 0; + + ata_acpi_gtf_to_tf(dev, gtf, &tf); + if (prev_gtf) { + ata_acpi_gtf_to_tf(dev, prev_gtf, &ptf); + pptf = &ptf; + } + + if (!ata_acpi_filter_tf(dev, &tf, pptf)) { + rtf = tf; + err_mask = ata_exec_internal(dev, &rtf, NULL, + DMA_NONE, NULL, 0, 0); + + switch (err_mask) { + case 0: + level = KERN_DEBUG; + snprintf(msg, sizeof(msg), "succeeded"); + rc = 1; + break; + + case AC_ERR_DEV: + level = KERN_INFO; + snprintf(msg, sizeof(msg), + "rejected by device (Stat=0x%02x Err=0x%02x)", + rtf.command, rtf.feature); + rc = 0; + break; + + default: + level = KERN_ERR; + snprintf(msg, sizeof(msg), + "failed (Emask=0x%x Stat=0x%02x Err=0x%02x)", + err_mask, rtf.command, rtf.feature); + rc = -EIO; + break; + } + } else { + level = KERN_INFO; + snprintf(msg, sizeof(msg), "filtered out"); + rc = 0; + } + descr = ata_get_cmd_descript(tf.command); + + ata_dev_printk(dev, level, + "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n", + tf.command, tf.feature, tf.nsect, tf.lbal, + tf.lbam, tf.lbah, tf.device, + (descr ? descr : "unknown"), msg); + + return rc; +} + +/** + * ata_acpi_exec_tfs - get then write drive taskfile settings + * @dev: target ATA device + * @nr_executed: out parameter for the number of executed commands + * + * Evaluate _GTF and execute returned taskfiles. + * + * LOCKING: + * EH context. + * + * RETURNS: + * Number of executed taskfiles on success, 0 if _GTF doesn't exist. + * -errno on other errors. + */ +static int ata_acpi_exec_tfs(struct ata_device *dev, int *nr_executed) +{ + struct ata_acpi_gtf *gtf = NULL, *pgtf = NULL; + int gtf_count, i, rc; + + /* get taskfiles */ + rc = ata_dev_get_GTF(dev, >f); + if (rc < 0) + return rc; + gtf_count = rc; + + /* execute them */ + for (i = 0; i < gtf_count; i++, gtf++) { + rc = ata_acpi_run_tf(dev, gtf, pgtf); + if (rc < 0) + break; + if (rc) { + (*nr_executed)++; + pgtf = gtf; + } + } + + ata_acpi_clear_gtf(dev); + + if (rc < 0) + return rc; + return 0; +} + +/** + * ata_acpi_push_id - send Identify data to drive + * @dev: target ATA device + * + * _SDD ACPI object: for SATA mode only + * Must be after Identify (Packet) Device -- uses its data + * ATM this function never returns a failure. It is an optional + * method and if it fails for whatever reason, we should still + * just keep going. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 0 on success, -ENOENT if _SDD doesn't exist, -errno on failure. + */ +static int ata_acpi_push_id(struct ata_device *dev) +{ + struct ata_port *ap = dev->link->ap; + acpi_status status; + struct acpi_object_list input; + union acpi_object in_params[1]; + + if (ata_msg_probe(ap)) + ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n", + __func__, dev->devno, ap->port_no); + + /* Give the drive Identify data to the drive via the _SDD method */ + /* _SDD: set up input parameters */ + input.count = 1; + input.pointer = in_params; + in_params[0].type = ACPI_TYPE_BUFFER; + in_params[0].buffer.length = sizeof(dev->id[0]) * ATA_ID_WORDS; + in_params[0].buffer.pointer = (u8 *)dev->id; + /* Output buffer: _SDD has no output */ + + /* It's OK for _SDD to be missing too. */ + swap_buf_le16(dev->id, ATA_ID_WORDS); + status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_SDD", &input, + NULL); + swap_buf_le16(dev->id, ATA_ID_WORDS); + + if (status == AE_NOT_FOUND) + return -ENOENT; + + if (ACPI_FAILURE(status)) { + ata_dev_warn(dev, "ACPI _SDD failed (AE 0x%x)\n", status); + return -EIO; + } + + return 0; +} + +/** + * ata_acpi_on_suspend - ATA ACPI hook called on suspend + * @ap: target ATA port + * + * This function is called when @ap is about to be suspended. All + * devices are already put to sleep but the port_suspend() callback + * hasn't been executed yet. Error return from this function aborts + * suspend. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int ata_acpi_on_suspend(struct ata_port *ap) +{ + /* nada */ + return 0; +} + +/** + * ata_acpi_on_resume - ATA ACPI hook called on resume + * @ap: target ATA port + * + * This function is called when @ap is resumed - right after port + * itself is resumed but before any EH action is taken. + * + * LOCKING: + * EH context. + */ +void ata_acpi_on_resume(struct ata_port *ap) +{ + const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); + struct ata_device *dev; + + if (ACPI_HANDLE(&ap->tdev) && gtm) { + /* _GTM valid */ + + /* restore timing parameters */ + ata_acpi_stm(ap, gtm); + + /* _GTF should immediately follow _STM so that it can + * use values set by _STM. Cache _GTF result and + * schedule _GTF. + */ + ata_for_each_dev(dev, &ap->link, ALL) { + ata_acpi_clear_gtf(dev); + if (ata_dev_enabled(dev) && + ata_dev_acpi_handle(dev) && + ata_dev_get_GTF(dev, NULL) >= 0) + dev->flags |= ATA_DFLAG_ACPI_PENDING; + } + } else { + /* SATA _GTF needs to be evaulated after _SDD and + * there's no reason to evaluate IDE _GTF early + * without _STM. Clear cache and schedule _GTF. + */ + ata_for_each_dev(dev, &ap->link, ALL) { + ata_acpi_clear_gtf(dev); + if (ata_dev_enabled(dev)) + dev->flags |= ATA_DFLAG_ACPI_PENDING; + } + } +} + +static int ata_acpi_choose_suspend_state(struct ata_device *dev, bool runtime) +{ + int d_max_in = ACPI_STATE_D3_COLD; + if (!runtime) + goto out; + + /* + * For ATAPI, runtime D3 cold is only allowed + * for ZPODD in zero power ready state + */ + if (dev->class == ATA_DEV_ATAPI && + !(zpodd_dev_enabled(dev) && zpodd_zpready(dev))) + d_max_in = ACPI_STATE_D3_HOT; + +out: + return acpi_pm_device_sleep_state(&dev->tdev, NULL, d_max_in); +} + +static void sata_acpi_set_state(struct ata_port *ap, pm_message_t state) +{ + bool runtime = PMSG_IS_AUTO(state); + struct ata_device *dev; + acpi_handle handle; + int acpi_state; + + ata_for_each_dev(dev, &ap->link, ENABLED) { + handle = ata_dev_acpi_handle(dev); + if (!handle) + continue; + + if (!(state.event & PM_EVENT_RESUME)) { + acpi_state = ata_acpi_choose_suspend_state(dev, runtime); + if (acpi_state == ACPI_STATE_D0) + continue; + if (runtime && zpodd_dev_enabled(dev) && + acpi_state == ACPI_STATE_D3_COLD) + zpodd_enable_run_wake(dev); + acpi_bus_set_power(handle, acpi_state); + } else { + if (runtime && zpodd_dev_enabled(dev)) + zpodd_disable_run_wake(dev); + acpi_bus_set_power(handle, ACPI_STATE_D0); + } + } +} + +/* ACPI spec requires _PS0 when IDE power on and _PS3 when power off */ +static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state) +{ + struct ata_device *dev; + acpi_handle port_handle; + + port_handle = ACPI_HANDLE(&ap->tdev); + if (!port_handle) + return; + + /* channel first and then drives for power on and vica versa + for power off */ + if (state.event & PM_EVENT_RESUME) + acpi_bus_set_power(port_handle, ACPI_STATE_D0); + + ata_for_each_dev(dev, &ap->link, ENABLED) { + acpi_handle dev_handle = ata_dev_acpi_handle(dev); + if (!dev_handle) + continue; + + acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ? + ACPI_STATE_D0 : ACPI_STATE_D3_COLD); + } + + if (!(state.event & PM_EVENT_RESUME)) + acpi_bus_set_power(port_handle, ACPI_STATE_D3_COLD); +} + +/** + * ata_acpi_set_state - set the port power state + * @ap: target ATA port + * @state: state, on/off + * + * This function sets a proper ACPI D state for the device on + * system and runtime PM operations. + */ +void ata_acpi_set_state(struct ata_port *ap, pm_message_t state) +{ + if (ap->flags & ATA_FLAG_ACPI_SATA) + sata_acpi_set_state(ap, state); + else + pata_acpi_set_state(ap, state); +} + +/** + * ata_acpi_on_devcfg - ATA ACPI hook called on device donfiguration + * @dev: target ATA device + * + * This function is called when @dev is about to be configured. + * IDENTIFY data might have been modified after this hook is run. + * + * LOCKING: + * EH context. + * + * RETURNS: + * Positive number if IDENTIFY data needs to be refreshed, 0 if not, + * -errno on failure. + */ +int ata_acpi_on_devcfg(struct ata_device *dev) +{ + struct ata_port *ap = dev->link->ap; + struct ata_eh_context *ehc = &ap->link.eh_context; + int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA; + int nr_executed = 0; + int rc; + + if (!ata_dev_acpi_handle(dev)) + return 0; + + /* do we need to do _GTF? */ + if (!(dev->flags & ATA_DFLAG_ACPI_PENDING) && + !(acpi_sata && (ehc->i.flags & ATA_EHI_DID_HARDRESET))) + return 0; + + /* do _SDD if SATA */ + if (acpi_sata) { + rc = ata_acpi_push_id(dev); + if (rc && rc != -ENOENT) + goto acpi_err; + } + + /* do _GTF */ + rc = ata_acpi_exec_tfs(dev, &nr_executed); + if (rc) + goto acpi_err; + + dev->flags &= ~ATA_DFLAG_ACPI_PENDING; + + /* refresh IDENTIFY page if any _GTF command has been executed */ + if (nr_executed) { + rc = ata_dev_reread_id(dev, 0); + if (rc < 0) { + ata_dev_err(dev, + "failed to IDENTIFY after ACPI commands\n"); + return rc; + } + } + + return 0; + + acpi_err: + /* ignore evaluation failure if we can continue safely */ + if (rc == -EINVAL && !nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN)) + return 0; + + /* fail and let EH retry once more for unknown IO errors */ + if (!(dev->flags & ATA_DFLAG_ACPI_FAILED)) { + dev->flags |= ATA_DFLAG_ACPI_FAILED; + return rc; + } + + dev->flags |= ATA_DFLAG_ACPI_DISABLED; + ata_dev_warn(dev, "ACPI: failed the second time, disabled\n"); + + /* We can safely continue if no _GTF command has been executed + * and port is not frozen. + */ + if (!nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN)) + return 0; + + return rc; +} + +/** + * ata_acpi_on_disable - ATA ACPI hook called when a device is disabled + * @dev: target ATA device + * + * This function is called when @dev is about to be disabled. + * + * LOCKING: + * EH context. + */ +void ata_acpi_on_disable(struct ata_device *dev) +{ + ata_acpi_clear_gtf(dev); +} diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b4abd685036..677c0c1b03b 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1,7 +1,7 @@ /* * libata-core.c - helper library for ATA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -30,6 +30,14 @@ * Hardware documentation available from http://www.t13.org/ and * http://www.sata-io.org/ * + * Standards documents from: + * http://www.t13.org (ATA standards, PCI DMA IDE spec) + * http://www.t10.org (SCSI MMC - for ATAPI MMC) + * http://www.sata-io.org (SATA) + * http://www.compactflash.org (CF) + * http://www.qic.org (QIC157 - Tape and DSC) + * http://www.ce-ata.org (CE-ATA: not supported) + * */ #include <linux/kernel.h> @@ -38,7 +46,6 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/mm.h> -#include <linux/highmem.h> #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/delay.h> @@ -47,60 +54,489 @@ #include <linux/completion.h> #include <linux/suspend.h> #include <linux/workqueue.h> -#include <linux/jiffies.h> #include <linux/scatterlist.h> +#include <linux/io.h> +#include <linux/async.h> +#include <linux/log2.h> +#include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <linux/libata.h> -#include <asm/io.h> -#include <asm/semaphore.h> #include <asm/byteorder.h> +#include <linux/cdrom.h> +#include <linux/ratelimit.h> +#include <linux/pm_runtime.h> +#include <linux/platform_device.h> #include "libata.h" +#include "libata-transport.h" /* debounce timing parameters in msecs { interval, duration, timeout } */ const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; +const struct ata_port_operations ata_base_port_ops = { + .prereset = ata_std_prereset, + .postreset = ata_std_postreset, + .error_handler = ata_std_error_handler, + .sched_eh = ata_std_sched_eh, + .end_eh = ata_std_end_eh, +}; + +const struct ata_port_operations sata_port_ops = { + .inherits = &ata_base_port_ops, + + .qc_defer = ata_std_qc_defer, + .hardreset = sata_std_hardreset, +}; + static unsigned int ata_dev_init_params(struct ata_device *dev, u16 heads, u16 sectors); static unsigned int ata_dev_set_xfermode(struct ata_device *dev); static void ata_dev_xfermask(struct ata_device *dev); +static unsigned long ata_dev_blacklisted(const struct ata_device *dev); + +atomic_t ata_print_id = ATOMIC_INIT(0); + +struct ata_force_param { + const char *name; + unsigned int cbl; + int spd_limit; + unsigned long xfer_mask; + unsigned int horkage_on; + unsigned int horkage_off; + unsigned int lflags; +}; -static unsigned int ata_unique_id = 1; -static struct workqueue_struct *ata_wq; +struct ata_force_ent { + int port; + int device; + struct ata_force_param param; +}; -struct workqueue_struct *ata_aux_wq; +static struct ata_force_ent *ata_force_tbl; +static int ata_force_tbl_size; -int atapi_enabled = 1; +static char ata_force_param_buf[PAGE_SIZE] __initdata; +/* param_buf is thrown away after initialization, disallow read */ +module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); +MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); + +static int atapi_enabled = 1; module_param(atapi_enabled, int, 0444); -MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); +MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); -int atapi_dmadir = 0; +static int atapi_dmadir = 0; module_param(atapi_dmadir, int, 0444); -MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); +MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); + +int atapi_passthru16 = 1; +module_param(atapi_passthru16, int, 0444); +MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); int libata_fua = 0; module_param_named(fua, libata_fua, int, 0444); -MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); +MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); -static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; +static int ata_ignore_hpa; +module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); +MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); + +static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; +module_param_named(dma, libata_dma_mask, int, 0444); +MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); + +static int ata_probe_timeout; module_param(ata_probe_timeout, int, 0444); MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); +int libata_noacpi = 0; +module_param_named(noacpi, libata_noacpi, int, 0444); +MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); + +int libata_allow_tpm = 0; +module_param_named(allow_tpm, libata_allow_tpm, int, 0444); +MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); + +static int atapi_an; +module_param(atapi_an, int, 0444); +MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); + MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("Library module for ATA devices"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +static bool ata_sstatus_online(u32 sstatus) +{ + return (sstatus & 0xf) == 0x3; +} + +/** + * ata_link_next - link iteration helper + * @link: the previous link, NULL to start + * @ap: ATA port containing links to iterate + * @mode: iteration mode, one of ATA_LITER_* + * + * LOCKING: + * Host lock or EH context. + * + * RETURNS: + * Pointer to the next link. + */ +struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, + enum ata_link_iter_mode mode) +{ + BUG_ON(mode != ATA_LITER_EDGE && + mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); + + /* NULL link indicates start of iteration */ + if (!link) + switch (mode) { + case ATA_LITER_EDGE: + case ATA_LITER_PMP_FIRST: + if (sata_pmp_attached(ap)) + return ap->pmp_link; + /* fall through */ + case ATA_LITER_HOST_FIRST: + return &ap->link; + } + + /* we just iterated over the host link, what's next? */ + if (link == &ap->link) + switch (mode) { + case ATA_LITER_HOST_FIRST: + if (sata_pmp_attached(ap)) + return ap->pmp_link; + /* fall through */ + case ATA_LITER_PMP_FIRST: + if (unlikely(ap->slave_link)) + return ap->slave_link; + /* fall through */ + case ATA_LITER_EDGE: + return NULL; + } + + /* slave_link excludes PMP */ + if (unlikely(link == ap->slave_link)) + return NULL; + + /* we were over a PMP link */ + if (++link < ap->pmp_link + ap->nr_pmp_links) + return link; + + if (mode == ATA_LITER_PMP_FIRST) + return &ap->link; + + return NULL; +} + +/** + * ata_dev_next - device iteration helper + * @dev: the previous device, NULL to start + * @link: ATA link containing devices to iterate + * @mode: iteration mode, one of ATA_DITER_* + * + * LOCKING: + * Host lock or EH context. + * + * RETURNS: + * Pointer to the next device. + */ +struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, + enum ata_dev_iter_mode mode) +{ + BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && + mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); + + /* NULL dev indicates start of iteration */ + if (!dev) + switch (mode) { + case ATA_DITER_ENABLED: + case ATA_DITER_ALL: + dev = link->device; + goto check; + case ATA_DITER_ENABLED_REVERSE: + case ATA_DITER_ALL_REVERSE: + dev = link->device + ata_link_max_devices(link) - 1; + goto check; + } + + next: + /* move to the next one */ + switch (mode) { + case ATA_DITER_ENABLED: + case ATA_DITER_ALL: + if (++dev < link->device + ata_link_max_devices(link)) + goto check; + return NULL; + case ATA_DITER_ENABLED_REVERSE: + case ATA_DITER_ALL_REVERSE: + if (--dev >= link->device) + goto check; + return NULL; + } + + check: + if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && + !ata_dev_enabled(dev)) + goto next; + return dev; +} + +/** + * ata_dev_phys_link - find physical link for a device + * @dev: ATA device to look up physical link for + * + * Look up physical link which @dev is attached to. Note that + * this is different from @dev->link only when @dev is on slave + * link. For all other cases, it's the same as @dev->link. + * + * LOCKING: + * Don't care. + * + * RETURNS: + * Pointer to the found physical link. + */ +struct ata_link *ata_dev_phys_link(struct ata_device *dev) +{ + struct ata_port *ap = dev->link->ap; + + if (!ap->slave_link) + return dev->link; + if (!dev->devno) + return &ap->link; + return ap->slave_link; +} + +/** + * ata_force_cbl - force cable type according to libata.force + * @ap: ATA port of interest + * + * Force cable type according to libata.force and whine about it. + * The last entry which has matching port number is used, so it + * can be specified as part of device force parameters. For + * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the + * same effect. + * + * LOCKING: + * EH context. + */ +void ata_force_cbl(struct ata_port *ap) +{ + int i; + + for (i = ata_force_tbl_size - 1; i >= 0; i--) { + const struct ata_force_ent *fe = &ata_force_tbl[i]; + + if (fe->port != -1 && fe->port != ap->print_id) + continue; + + if (fe->param.cbl == ATA_CBL_NONE) + continue; + + ap->cbl = fe->param.cbl; + ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); + return; + } +} + +/** + * ata_force_link_limits - force link limits according to libata.force + * @link: ATA link of interest + * + * Force link flags and SATA spd limit according to libata.force + * and whine about it. When only the port part is specified + * (e.g. 1:), the limit applies to all links connected to both + * the host link and all fan-out ports connected via PMP. If the + * device part is specified as 0 (e.g. 1.00:), it specifies the + * first fan-out link not the host link. Device number 15 always + * points to the host link whether PMP is attached or not. If the + * controller has slave link, device number 16 points to it. + * + * LOCKING: + * EH context. + */ +static void ata_force_link_limits(struct ata_link *link) +{ + bool did_spd = false; + int linkno = link->pmp; + int i; + + if (ata_is_host_link(link)) + linkno += 15; + + for (i = ata_force_tbl_size - 1; i >= 0; i--) { + const struct ata_force_ent *fe = &ata_force_tbl[i]; + + if (fe->port != -1 && fe->port != link->ap->print_id) + continue; + + if (fe->device != -1 && fe->device != linkno) + continue; + + /* only honor the first spd limit */ + if (!did_spd && fe->param.spd_limit) { + link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; + ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", + fe->param.name); + did_spd = true; + } + + /* let lflags stack */ + if (fe->param.lflags) { + link->flags |= fe->param.lflags; + ata_link_notice(link, + "FORCE: link flag 0x%x forced -> 0x%x\n", + fe->param.lflags, link->flags); + } + } +} + +/** + * ata_force_xfermask - force xfermask according to libata.force + * @dev: ATA device of interest + * + * Force xfer_mask according to libata.force and whine about it. + * For consistency with link selection, device number 15 selects + * the first device connected to the host link. + * + * LOCKING: + * EH context. + */ +static void ata_force_xfermask(struct ata_device *dev) +{ + int devno = dev->link->pmp + dev->devno; + int alt_devno = devno; + int i; + + /* allow n.15/16 for devices attached to host port */ + if (ata_is_host_link(dev->link)) + alt_devno += 15; + + for (i = ata_force_tbl_size - 1; i >= 0; i--) { + const struct ata_force_ent *fe = &ata_force_tbl[i]; + unsigned long pio_mask, mwdma_mask, udma_mask; + + if (fe->port != -1 && fe->port != dev->link->ap->print_id) + continue; + + if (fe->device != -1 && fe->device != devno && + fe->device != alt_devno) + continue; + + if (!fe->param.xfer_mask) + continue; + + ata_unpack_xfermask(fe->param.xfer_mask, + &pio_mask, &mwdma_mask, &udma_mask); + if (udma_mask) + dev->udma_mask = udma_mask; + else if (mwdma_mask) { + dev->udma_mask = 0; + dev->mwdma_mask = mwdma_mask; + } else { + dev->udma_mask = 0; + dev->mwdma_mask = 0; + dev->pio_mask = pio_mask; + } + + ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", + fe->param.name); + return; + } +} + +/** + * ata_force_horkage - force horkage according to libata.force + * @dev: ATA device of interest + * + * Force horkage according to libata.force and whine about it. + * For consistency with link selection, device number 15 selects + * the first device connected to the host link. + * + * LOCKING: + * EH context. + */ +static void ata_force_horkage(struct ata_device *dev) +{ + int devno = dev->link->pmp + dev->devno; + int alt_devno = devno; + int i; + + /* allow n.15/16 for devices attached to host port */ + if (ata_is_host_link(dev->link)) + alt_devno += 15; + + for (i = 0; i < ata_force_tbl_size; i++) { + const struct ata_force_ent *fe = &ata_force_tbl[i]; + + if (fe->port != -1 && fe->port != dev->link->ap->print_id) + continue; + + if (fe->device != -1 && fe->device != devno && + fe->device != alt_devno) + continue; + + if (!(~dev->horkage & fe->param.horkage_on) && + !(dev->horkage & fe->param.horkage_off)) + continue; + + dev->horkage |= fe->param.horkage_on; + dev->horkage &= ~fe->param.horkage_off; + + ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", + fe->param.name); + } +} + +/** + * atapi_cmd_type - Determine ATAPI command type from SCSI opcode + * @opcode: SCSI opcode + * + * Determine ATAPI command type from @opcode. + * + * LOCKING: + * None. + * + * RETURNS: + * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} + */ +int atapi_cmd_type(u8 opcode) +{ + switch (opcode) { + case GPCMD_READ_10: + case GPCMD_READ_12: + return ATAPI_READ; + + case GPCMD_WRITE_10: + case GPCMD_WRITE_12: + case GPCMD_WRITE_AND_VERIFY_10: + return ATAPI_WRITE; + + case GPCMD_READ_CD: + case GPCMD_READ_CD_MSF: + return ATAPI_READ_CD; + + case ATA_16: + case ATA_12: + if (atapi_passthru16) + return ATAPI_PASS_THRU; + /* fall thru */ + default: + return ATAPI_MISC; + } +} + /** * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure * @tf: Taskfile to convert - * @fis: Buffer into which data will output * @pmp: Port multiplier port + * @is_cmd: This FIS is for command + * @fis: Buffer into which data will output * * Converts a standard ATA taskfile to a Serial ATA * FIS structure (Register - Host to Device). @@ -108,12 +544,13 @@ MODULE_VERSION(DRV_VERSION); * LOCKING: * Inherited from caller. */ - -void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp) +void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) { - fis[0] = 0x27; /* Register - Host to Device FIS */ - fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number, - bit 7 indicates Command FIS */ + fis[0] = 0x27; /* Register - Host to Device FIS */ + fis[1] = pmp & 0xf; /* Port multiplier number*/ + if (is_cmd) + fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ + fis[2] = tf->command; fis[3] = tf->feature; @@ -132,10 +569,10 @@ void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp) fis[14] = 0; fis[15] = tf->ctl; - fis[16] = 0; - fis[17] = 0; - fis[18] = 0; - fis[19] = 0; + fis[16] = tf->auxiliary & 0xff; + fis[17] = (tf->auxiliary >> 8) & 0xff; + fis[18] = (tf->auxiliary >> 16) & 0xff; + fis[19] = (tf->auxiliary >> 24) & 0xff; } /** @@ -199,7 +636,8 @@ static const u8 ata_rw_cmds[] = { /** * ata_rwcmd_protocol - set taskfile r/w commands and protocol - * @qc: command to examine and configure + * @tf: command to examine and configure + * @dev: device tf belongs to * * Examine the device configuration and tf->flags to calculate * the proper read/write commands and protocol to use. @@ -207,10 +645,8 @@ static const u8 ata_rw_cmds[] = { * LOCKING: * caller. */ -int ata_rwcmd_protocol(struct ata_queued_cmd *qc) +static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) { - struct ata_taskfile *tf = &qc->tf; - struct ata_device *dev = qc->dev; u8 cmd; int index, fua, lba48, write; @@ -222,7 +658,7 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc) if (dev->flags & ATA_DFLAG_PIO) { tf->protocol = ATA_PROT_PIO; index = dev->multi_count ? 0 : 8; - } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) { + } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { /* Unable to use DMA due to host limitation */ tf->protocol = ATA_PROT_PIO; index = dev->multi_count ? 0 : 8; @@ -240,6 +676,178 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc) } /** + * ata_tf_read_block - Read block address from ATA taskfile + * @tf: ATA taskfile of interest + * @dev: ATA device @tf belongs to + * + * LOCKING: + * None. + * + * Read block address from @tf. This function can handle all + * three address formats - LBA, LBA48 and CHS. tf->protocol and + * flags select the address format to use. + * + * RETURNS: + * Block address read from @tf. + */ +u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) +{ + u64 block = 0; + + if (tf->flags & ATA_TFLAG_LBA) { + if (tf->flags & ATA_TFLAG_LBA48) { + block |= (u64)tf->hob_lbah << 40; + block |= (u64)tf->hob_lbam << 32; + block |= (u64)tf->hob_lbal << 24; + } else + block |= (tf->device & 0xf) << 24; + + block |= tf->lbah << 16; + block |= tf->lbam << 8; + block |= tf->lbal; + } else { + u32 cyl, head, sect; + + cyl = tf->lbam | (tf->lbah << 8); + head = tf->device & 0xf; + sect = tf->lbal; + + if (!sect) { + ata_dev_warn(dev, + "device reported invalid CHS sector 0\n"); + sect = 1; /* oh well */ + } + + block = (cyl * dev->heads + head) * dev->sectors + sect - 1; + } + + return block; +} + +/** + * ata_build_rw_tf - Build ATA taskfile for given read/write request + * @tf: Target ATA taskfile + * @dev: ATA device @tf belongs to + * @block: Block address + * @n_block: Number of blocks + * @tf_flags: RW/FUA etc... + * @tag: tag + * + * LOCKING: + * None. + * + * Build ATA taskfile @tf for read/write request described by + * @block, @n_block, @tf_flags and @tag on @dev. + * + * RETURNS: + * + * 0 on success, -ERANGE if the request is too large for @dev, + * -EINVAL if the request is invalid. + */ +int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, + u64 block, u32 n_block, unsigned int tf_flags, + unsigned int tag) +{ + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf->flags |= tf_flags; + + if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { + /* yay, NCQ */ + if (!lba_48_ok(block, n_block)) + return -ERANGE; + + tf->protocol = ATA_PROT_NCQ; + tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; + + if (tf->flags & ATA_TFLAG_WRITE) + tf->command = ATA_CMD_FPDMA_WRITE; + else + tf->command = ATA_CMD_FPDMA_READ; + + tf->nsect = tag << 3; + tf->hob_feature = (n_block >> 8) & 0xff; + tf->feature = n_block & 0xff; + + tf->hob_lbah = (block >> 40) & 0xff; + tf->hob_lbam = (block >> 32) & 0xff; + tf->hob_lbal = (block >> 24) & 0xff; + tf->lbah = (block >> 16) & 0xff; + tf->lbam = (block >> 8) & 0xff; + tf->lbal = block & 0xff; + + tf->device = ATA_LBA; + if (tf->flags & ATA_TFLAG_FUA) + tf->device |= 1 << 7; + } else if (dev->flags & ATA_DFLAG_LBA) { + tf->flags |= ATA_TFLAG_LBA; + + if (lba_28_ok(block, n_block)) { + /* use LBA28 */ + tf->device |= (block >> 24) & 0xf; + } else if (lba_48_ok(block, n_block)) { + if (!(dev->flags & ATA_DFLAG_LBA48)) + return -ERANGE; + + /* use LBA48 */ + tf->flags |= ATA_TFLAG_LBA48; + + tf->hob_nsect = (n_block >> 8) & 0xff; + + tf->hob_lbah = (block >> 40) & 0xff; + tf->hob_lbam = (block >> 32) & 0xff; + tf->hob_lbal = (block >> 24) & 0xff; + } else + /* request too large even for LBA48 */ + return -ERANGE; + + if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) + return -EINVAL; + + tf->nsect = n_block & 0xff; + + tf->lbah = (block >> 16) & 0xff; + tf->lbam = (block >> 8) & 0xff; + tf->lbal = block & 0xff; + + tf->device |= ATA_LBA; + } else { + /* CHS */ + u32 sect, head, cyl, track; + + /* The request -may- be too large for CHS addressing. */ + if (!lba_28_ok(block, n_block)) + return -ERANGE; + + if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) + return -EINVAL; + + /* Convert LBA to CHS */ + track = (u32)block / dev->sectors; + cyl = track / dev->heads; + head = track % dev->heads; + sect = (u32)block % dev->sectors + 1; + + DPRINTK("block %u track %u cyl %u head %u sect %u\n", + (u32)block, track, cyl, head, sect); + + /* Check whether the converted CHS can fit. + Cylinder: 0-65535 + Head: 0-15 + Sector: 1-255*/ + if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) + return -ERANGE; + + tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ + tf->lbal = sect; + tf->lbam = cyl; + tf->lbah = cyl >> 8; + tf->device |= head; + } + + return 0; +} + +/** * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask * @pio_mask: pio_mask * @mwdma_mask: mwdma_mask @@ -254,9 +862,9 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc) * RETURNS: * Packed xfer_mask. */ -static unsigned int ata_pack_xfermask(unsigned int pio_mask, - unsigned int mwdma_mask, - unsigned int udma_mask) +unsigned long ata_pack_xfermask(unsigned long pio_mask, + unsigned long mwdma_mask, + unsigned long udma_mask) { return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | @@ -273,10 +881,8 @@ static unsigned int ata_pack_xfermask(unsigned int pio_mask, * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. * Any NULL distination masks will be ignored. */ -static void ata_unpack_xfermask(unsigned int xfer_mask, - unsigned int *pio_mask, - unsigned int *mwdma_mask, - unsigned int *udma_mask) +void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, + unsigned long *mwdma_mask, unsigned long *udma_mask) { if (pio_mask) *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; @@ -290,9 +896,9 @@ static const struct ata_xfer_ent { int shift, bits; u8 base; } ata_xfer_tbl[] = { - { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 }, - { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 }, - { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 }, + { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, + { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, + { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, { -1, }, }; @@ -307,9 +913,9 @@ static const struct ata_xfer_ent { * None. * * RETURNS: - * Matching XFER_* value, 0 if no match found. + * Matching XFER_* value, 0xff if no match found. */ -static u8 ata_xfer_mask2mode(unsigned int xfer_mask) +u8 ata_xfer_mask2mode(unsigned long xfer_mask) { int highbit = fls(xfer_mask) - 1; const struct ata_xfer_ent *ent; @@ -317,7 +923,7 @@ static u8 ata_xfer_mask2mode(unsigned int xfer_mask) for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) if (highbit >= ent->shift && highbit < ent->shift + ent->bits) return ent->base + highbit - ent->shift; - return 0; + return 0xff; } /** @@ -332,13 +938,14 @@ static u8 ata_xfer_mask2mode(unsigned int xfer_mask) * RETURNS: * Matching xfer_mask, 0 if no match found. */ -static unsigned int ata_xfer_mode2mask(u8 xfer_mode) +unsigned long ata_xfer_mode2mask(u8 xfer_mode) { const struct ata_xfer_ent *ent; for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) - return 1 << (ent->shift + xfer_mode - ent->base); + return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) + & ~((1 << ent->shift) - 1); return 0; } @@ -354,7 +961,7 @@ static unsigned int ata_xfer_mode2mask(u8 xfer_mode) * RETURNS: * Matching xfer_shift, -1 if no match found. */ -static int ata_xfer_mode2shift(unsigned int xfer_mode) +int ata_xfer_mode2shift(unsigned long xfer_mode) { const struct ata_xfer_ent *ent; @@ -378,7 +985,7 @@ static int ata_xfer_mode2shift(unsigned int xfer_mode) * Constant C string representing highest speed listed in * @mode_mask, or the constant C string "<n/a>". */ -static const char *ata_mode_string(unsigned int xfer_mask) +const char *ata_mode_string(unsigned long xfer_mask) { static const char * const xfer_mode_str[] = { "PIO0", @@ -410,11 +1017,12 @@ static const char *ata_mode_string(unsigned int xfer_mask) return "<n/a>"; } -static const char *sata_spd_string(unsigned int spd) +const char *sata_spd_string(unsigned int spd) { static const char * const spd_str[] = { "1.5 Gbps", "3.0 Gbps", + "6.0 Gbps", }; if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) @@ -422,123 +1030,6 @@ static const char *sata_spd_string(unsigned int spd) return spd_str[spd - 1]; } -void ata_dev_disable(struct ata_device *dev) -{ - if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) { - ata_dev_printk(dev, KERN_WARNING, "disabled\n"); - dev->class++; - } -} - -/** - * ata_pio_devchk - PATA device presence detection - * @ap: ATA channel to examine - * @device: Device to examine (starting at zero) - * - * This technique was originally described in - * Hale Landis's ATADRVR (www.ata-atapi.com), and - * later found its way into the ATA/ATAPI spec. - * - * Write a pattern to the ATA shadow registers, - * and if a device is present, it will respond by - * correctly storing and echoing back the - * ATA shadow register contents. - * - * LOCKING: - * caller. - */ - -static unsigned int ata_pio_devchk(struct ata_port *ap, - unsigned int device) -{ - struct ata_ioports *ioaddr = &ap->ioaddr; - u8 nsect, lbal; - - ap->ops->dev_select(ap, device); - - outb(0x55, ioaddr->nsect_addr); - outb(0xaa, ioaddr->lbal_addr); - - outb(0xaa, ioaddr->nsect_addr); - outb(0x55, ioaddr->lbal_addr); - - outb(0x55, ioaddr->nsect_addr); - outb(0xaa, ioaddr->lbal_addr); - - nsect = inb(ioaddr->nsect_addr); - lbal = inb(ioaddr->lbal_addr); - - if ((nsect == 0x55) && (lbal == 0xaa)) - return 1; /* we found a device */ - - return 0; /* nothing found */ -} - -/** - * ata_mmio_devchk - PATA device presence detection - * @ap: ATA channel to examine - * @device: Device to examine (starting at zero) - * - * This technique was originally described in - * Hale Landis's ATADRVR (www.ata-atapi.com), and - * later found its way into the ATA/ATAPI spec. - * - * Write a pattern to the ATA shadow registers, - * and if a device is present, it will respond by - * correctly storing and echoing back the - * ATA shadow register contents. - * - * LOCKING: - * caller. - */ - -static unsigned int ata_mmio_devchk(struct ata_port *ap, - unsigned int device) -{ - struct ata_ioports *ioaddr = &ap->ioaddr; - u8 nsect, lbal; - - ap->ops->dev_select(ap, device); - - writeb(0x55, (void __iomem *) ioaddr->nsect_addr); - writeb(0xaa, (void __iomem *) ioaddr->lbal_addr); - - writeb(0xaa, (void __iomem *) ioaddr->nsect_addr); - writeb(0x55, (void __iomem *) ioaddr->lbal_addr); - - writeb(0x55, (void __iomem *) ioaddr->nsect_addr); - writeb(0xaa, (void __iomem *) ioaddr->lbal_addr); - - nsect = readb((void __iomem *) ioaddr->nsect_addr); - lbal = readb((void __iomem *) ioaddr->lbal_addr); - - if ((nsect == 0x55) && (lbal == 0xaa)) - return 1; /* we found a device */ - - return 0; /* nothing found */ -} - -/** - * ata_devchk - PATA device presence detection - * @ap: ATA channel to examine - * @device: Device to examine (starting at zero) - * - * Dispatch ATA device presence detection, depending - * on whether we are using PIO or MMIO to talk to the - * ATA shadow registers. - * - * LOCKING: - * caller. - */ - -static unsigned int ata_devchk(struct ata_port *ap, - unsigned int device) -{ - if (ap->flags & ATA_FLAG_MMIO) - return ata_mmio_devchk(ap, device); - return ata_pio_devchk(ap, device); -} - /** * ata_dev_classify - determine device type based on ATA-spec signature * @tf: ATA taskfile register set for device to be identified @@ -551,90 +1042,54 @@ static unsigned int ata_devchk(struct ata_port *ap, * None. * * RETURNS: - * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN - * the event of failure. + * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or + * %ATA_DEV_UNKNOWN the event of failure. */ - unsigned int ata_dev_classify(const struct ata_taskfile *tf) { /* Apple's open source Darwin code hints that some devices only * put a proper signature into the LBA mid/high registers, * So, we only check those. It's sufficient for uniqueness. + * + * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate + * signatures for ATA and ATAPI devices attached on SerialATA, + * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA + * spec has never mentioned about using different signatures + * for ATA/ATAPI devices. Then, Serial ATA II: Port + * Multiplier specification began to use 0x69/0x96 to identify + * port multpliers and 0x3c/0xc3 to identify SEMB device. + * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and + * 0x69/0x96 shortly and described them as reserved for + * SerialATA. + * + * We follow the current spec and consider that 0x69/0x96 + * identifies a port multiplier and 0x3c/0xc3 a SEMB device. + * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports + * SEMB signature. This is worked around in + * ata_dev_read_id(). */ - - if (((tf->lbam == 0) && (tf->lbah == 0)) || - ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) { + if ((tf->lbam == 0) && (tf->lbah == 0)) { DPRINTK("found ATA device by sig\n"); return ATA_DEV_ATA; } - if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) || - ((tf->lbam == 0x69) && (tf->lbah == 0x96))) { + if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { DPRINTK("found ATAPI device by sig\n"); return ATA_DEV_ATAPI; } - DPRINTK("unknown device\n"); - return ATA_DEV_UNKNOWN; -} - -/** - * ata_dev_try_classify - Parse returned ATA device signature - * @ap: ATA channel to examine - * @device: Device to examine (starting at zero) - * @r_err: Value of error register on completion - * - * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, - * an ATA/ATAPI-defined set of values is placed in the ATA - * shadow registers, indicating the results of device detection - * and diagnostics. - * - * Select the ATA device, and read the values from the ATA shadow - * registers. Then parse according to the Error register value, - * and the spec-defined values examined by ata_dev_classify(). - * - * LOCKING: - * caller. - * - * RETURNS: - * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. - */ - -static unsigned int -ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err) -{ - struct ata_taskfile tf; - unsigned int class; - u8 err; - - ap->ops->dev_select(ap, device); - - memset(&tf, 0, sizeof(tf)); - - ap->ops->tf_read(ap, &tf); - err = tf.feature; - if (r_err) - *r_err = err; - - /* see if device passed diags: if master then continue and warn later */ - if (err == 0 && device == 0) - /* diagnostic fail : do nothing _YET_ */ - ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC; - else if (err == 1) - /* do nothing */ ; - else if ((device == 0) && (err == 0x81)) - /* do nothing */ ; - else - return ATA_DEV_NONE; + if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { + DPRINTK("found PMP device by sig\n"); + return ATA_DEV_PMP; + } - /* determine if device is ATA or ATAPI */ - class = ata_dev_classify(&tf); + if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { + DPRINTK("found SEMB device by sig (could be ATA device)\n"); + return ATA_DEV_SEMB; + } - if (class == ATA_DEV_UNKNOWN) - return ATA_DEV_NONE; - if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) - return ATA_DEV_NONE; - return class; + DPRINTK("unknown device\n"); + return ATA_DEV_UNKNOWN; } /** @@ -657,6 +1112,8 @@ void ata_id_string(const u16 *id, unsigned char *s, { unsigned int c; + BUG_ON(len & 1); + while (len > 0) { c = id[ofs] >> 8; *s = c; @@ -690,8 +1147,6 @@ void ata_id_c_string(const u16 *id, unsigned char *s, { unsigned char *p; - WARN_ON(!(len & 1)); - ata_id_string(id, s, ofs, len - 1); p = s + strnlen(s, len - 1); @@ -704,102 +1159,249 @@ static u64 ata_id_n_sectors(const u16 *id) { if (ata_id_has_lba(id)) { if (ata_id_has_lba48(id)) - return ata_id_u64(id, 100); + return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); else - return ata_id_u32(id, 60); + return ata_id_u32(id, ATA_ID_LBA_CAPACITY); } else { if (ata_id_current_chs_valid(id)) - return ata_id_u32(id, 57); + return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * + id[ATA_ID_CUR_SECTORS]; else - return id[1] * id[3] * id[6]; + return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * + id[ATA_ID_SECTORS]; } } +u64 ata_tf_to_lba48(const struct ata_taskfile *tf) +{ + u64 sectors = 0; + + sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; + sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; + sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; + sectors |= (tf->lbah & 0xff) << 16; + sectors |= (tf->lbam & 0xff) << 8; + sectors |= (tf->lbal & 0xff); + + return sectors; +} + +u64 ata_tf_to_lba(const struct ata_taskfile *tf) +{ + u64 sectors = 0; + + sectors |= (tf->device & 0x0f) << 24; + sectors |= (tf->lbah & 0xff) << 16; + sectors |= (tf->lbam & 0xff) << 8; + sectors |= (tf->lbal & 0xff); + + return sectors; +} + /** - * ata_noop_dev_select - Select device 0/1 on ATA bus - * @ap: ATA channel to manipulate - * @device: ATA device (numbered from zero) to select - * - * This function performs no actual function. + * ata_read_native_max_address - Read native max address + * @dev: target device + * @max_sectors: out parameter for the result native max address * - * May be used as the dev_select() entry in ata_port_operations. + * Perform an LBA48 or LBA28 native size query upon the device in + * question. * - * LOCKING: - * caller. + * RETURNS: + * 0 on success, -EACCES if command is aborted by the drive. + * -EIO on other errors. */ -void ata_noop_dev_select (struct ata_port *ap, unsigned int device) +static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) { -} + unsigned int err_mask; + struct ata_taskfile tf; + int lba48 = ata_id_has_lba48(dev->id); + + ata_tf_init(dev, &tf); + + /* always clear all address registers */ + tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; + if (lba48) { + tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; + tf.flags |= ATA_TFLAG_LBA48; + } else + tf.command = ATA_CMD_READ_NATIVE_MAX; + + tf.protocol |= ATA_PROT_NODATA; + tf.device |= ATA_LBA; + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + if (err_mask) { + ata_dev_warn(dev, + "failed to read native max address (err_mask=0x%x)\n", + err_mask); + if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) + return -EACCES; + return -EIO; + } + + if (lba48) + *max_sectors = ata_tf_to_lba48(&tf) + 1; + else + *max_sectors = ata_tf_to_lba(&tf) + 1; + if (dev->horkage & ATA_HORKAGE_HPA_SIZE) + (*max_sectors)--; + return 0; +} /** - * ata_std_dev_select - Select device 0/1 on ATA bus - * @ap: ATA channel to manipulate - * @device: ATA device (numbered from zero) to select - * - * Use the method defined in the ATA specification to - * make either device 0, or device 1, active on the - * ATA channel. Works with both PIO and MMIO. + * ata_set_max_sectors - Set max sectors + * @dev: target device + * @new_sectors: new max sectors value to set for the device * - * May be used as the dev_select() entry in ata_port_operations. + * Set max sectors of @dev to @new_sectors. * - * LOCKING: - * caller. + * RETURNS: + * 0 on success, -EACCES if command is aborted or denied (due to + * previous non-volatile SET_MAX) by the drive. -EIO on other + * errors. */ - -void ata_std_dev_select (struct ata_port *ap, unsigned int device) +static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) { - u8 tmp; + unsigned int err_mask; + struct ata_taskfile tf; + int lba48 = ata_id_has_lba48(dev->id); - if (device == 0) - tmp = ATA_DEVICE_OBS; - else - tmp = ATA_DEVICE_OBS | ATA_DEV1; + new_sectors--; - if (ap->flags & ATA_FLAG_MMIO) { - writeb(tmp, (void __iomem *) ap->ioaddr.device_addr); + ata_tf_init(dev, &tf); + + tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; + + if (lba48) { + tf.command = ATA_CMD_SET_MAX_EXT; + tf.flags |= ATA_TFLAG_LBA48; + + tf.hob_lbal = (new_sectors >> 24) & 0xff; + tf.hob_lbam = (new_sectors >> 32) & 0xff; + tf.hob_lbah = (new_sectors >> 40) & 0xff; } else { - outb(tmp, ap->ioaddr.device_addr); + tf.command = ATA_CMD_SET_MAX; + + tf.device |= (new_sectors >> 24) & 0xf; + } + + tf.protocol |= ATA_PROT_NODATA; + tf.device |= ATA_LBA; + + tf.lbal = (new_sectors >> 0) & 0xff; + tf.lbam = (new_sectors >> 8) & 0xff; + tf.lbah = (new_sectors >> 16) & 0xff; + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + if (err_mask) { + ata_dev_warn(dev, + "failed to set max address (err_mask=0x%x)\n", + err_mask); + if (err_mask == AC_ERR_DEV && + (tf.feature & (ATA_ABORTED | ATA_IDNF))) + return -EACCES; + return -EIO; } - ata_pause(ap); /* needed; also flushes, for mmio */ + + return 0; } /** - * ata_dev_select - Select device 0/1 on ATA bus - * @ap: ATA channel to manipulate - * @device: ATA device (numbered from zero) to select - * @wait: non-zero to wait for Status register BSY bit to clear - * @can_sleep: non-zero if context allows sleeping - * - * Use the method defined in the ATA specification to - * make either device 0, or device 1, active on the - * ATA channel. + * ata_hpa_resize - Resize a device with an HPA set + * @dev: Device to resize * - * This is a high-level version of ata_std_dev_select(), - * which additionally provides the services of inserting - * the proper pauses and status polling, where needed. + * Read the size of an LBA28 or LBA48 disk with HPA features and resize + * it if required to the full size of the media. The caller must check + * the drive has the HPA feature set enabled. * - * LOCKING: - * caller. + * RETURNS: + * 0 on success, -errno on failure. */ - -void ata_dev_select(struct ata_port *ap, unsigned int device, - unsigned int wait, unsigned int can_sleep) +static int ata_hpa_resize(struct ata_device *dev) { - if (ata_msg_probe(ap)) - ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: " - "device %u, wait %u\n", ap->id, device, wait); + struct ata_eh_context *ehc = &dev->link->eh_context; + int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; + bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; + u64 sectors = ata_id_n_sectors(dev->id); + u64 native_sectors; + int rc; + + /* do we need to do it? */ + if (dev->class != ATA_DEV_ATA || + !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || + (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) + return 0; + + /* read native max address */ + rc = ata_read_native_max_address(dev, &native_sectors); + if (rc) { + /* If device aborted the command or HPA isn't going to + * be unlocked, skip HPA resizing. + */ + if (rc == -EACCES || !unlock_hpa) { + ata_dev_warn(dev, + "HPA support seems broken, skipping HPA handling\n"); + dev->horkage |= ATA_HORKAGE_BROKEN_HPA; + + /* we can continue if device aborted the command */ + if (rc == -EACCES) + rc = 0; + } - if (wait) - ata_wait_idle(ap); + return rc; + } + dev->n_native_sectors = native_sectors; - ap->ops->dev_select(ap, device); + /* nothing to do? */ + if (native_sectors <= sectors || !unlock_hpa) { + if (!print_info || native_sectors == sectors) + return 0; + + if (native_sectors > sectors) + ata_dev_info(dev, + "HPA detected: current %llu, native %llu\n", + (unsigned long long)sectors, + (unsigned long long)native_sectors); + else if (native_sectors < sectors) + ata_dev_warn(dev, + "native sectors (%llu) is smaller than sectors (%llu)\n", + (unsigned long long)native_sectors, + (unsigned long long)sectors); + return 0; + } + + /* let's unlock HPA */ + rc = ata_set_max_sectors(dev, native_sectors); + if (rc == -EACCES) { + /* if device aborted the command, skip HPA resizing */ + ata_dev_warn(dev, + "device aborted resize (%llu -> %llu), skipping HPA handling\n", + (unsigned long long)sectors, + (unsigned long long)native_sectors); + dev->horkage |= ATA_HORKAGE_BROKEN_HPA; + return 0; + } else if (rc) + return rc; + + /* re-read IDENTIFY data */ + rc = ata_dev_reread_id(dev, 0); + if (rc) { + ata_dev_err(dev, + "failed to re-read IDENTIFY data after HPA resizing\n"); + return rc; + } - if (wait) { - if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI) - msleep(150); - ata_wait_idle(ap); + if (print_info) { + u64 new_sectors = ata_id_n_sectors(dev->id); + ata_dev_info(dev, + "HPA unlocked: %llu -> %llu, native %llu\n", + (unsigned long long)sectors, + (unsigned long long)new_sectors, + (unsigned long long)native_sectors); } + + return 0; } /** @@ -856,9 +1458,9 @@ static inline void ata_dump_id(const u16 *id) * RETURNS: * Computed xfermask */ -static unsigned int ata_id_xfermask(const u16 *id) +unsigned long ata_id_xfermask(const u16 *id) { - unsigned int pio_mask, mwdma_mask, udma_mask; + unsigned long pio_mask, mwdma_mask, udma_mask; /* Usual case. Word 53 indicates word 64 is valid */ if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { @@ -870,7 +1472,11 @@ static unsigned int ata_id_xfermask(const u16 *id) * the PIO timing number for the maximum. Turn it into * a mask. */ - pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ; + u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; + if (mode < 5) /* Valid PIO range */ + pio_mask = (2 << mode) - 1; + else + pio_mask = 1; /* But wait.. there's more. Design your standards by * committee and you too can get a free iordy field to @@ -886,8 +1492,8 @@ static unsigned int ata_id_xfermask(const u16 *id) /* * Process compact flash extended modes */ - int pio = id[163] & 0x7; - int dma = (id[163] >> 3) & 7; + int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; + int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; if (pio) pio_mask |= (1 << 5); @@ -906,88 +1512,7 @@ static unsigned int ata_id_xfermask(const u16 *id) return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); } -/** - * ata_port_queue_task - Queue port_task - * @ap: The ata_port to queue port_task for - * @fn: workqueue function to be scheduled - * @data: data value to pass to workqueue function - * @delay: delay time for workqueue function - * - * Schedule @fn(@data) for execution after @delay jiffies using - * port_task. There is one port_task per port and it's the - * user(low level driver)'s responsibility to make sure that only - * one task is active at any given time. - * - * libata core layer takes care of synchronization between - * port_task and EH. ata_port_queue_task() may be ignored for EH - * synchronization. - * - * LOCKING: - * Inherited from caller. - */ -void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, - unsigned long delay) -{ - int rc; - - if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) - return; - - PREPARE_WORK(&ap->port_task, fn, data); - - if (!delay) - rc = queue_work(ata_wq, &ap->port_task); - else - rc = queue_delayed_work(ata_wq, &ap->port_task, delay); - - /* rc == 0 means that another user is using port task */ - WARN_ON(rc == 0); -} - -/** - * ata_port_flush_task - Flush port_task - * @ap: The ata_port to flush port_task for - * - * After this function completes, port_task is guranteed not to - * be running or scheduled. - * - * LOCKING: - * Kernel thread context (may sleep) - */ -void ata_port_flush_task(struct ata_port *ap) -{ - unsigned long flags; - - DPRINTK("ENTER\n"); - - spin_lock_irqsave(ap->lock, flags); - ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK; - spin_unlock_irqrestore(ap->lock, flags); - - DPRINTK("flush #1\n"); - flush_workqueue(ata_wq); - - /* - * At this point, if a task is running, it's guaranteed to see - * the FLUSH flag; thus, it will never queue pio tasks again. - * Cancel and flush. - */ - if (!cancel_delayed_work(&ap->port_task)) { - if (ata_msg_ctl(ap)) - ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", - __FUNCTION__); - flush_workqueue(ata_wq); - } - - spin_lock_irqsave(ap->lock, flags); - ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK; - spin_unlock_irqrestore(ap->lock, flags); - - if (ata_msg_ctl(ap)) - ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__); -} - -void ata_qc_complete_internal(struct ata_queued_cmd *qc) +static void ata_qc_complete_internal(struct ata_queued_cmd *qc) { struct completion *waiting = qc->private_data; @@ -995,13 +1520,14 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc) } /** - * ata_exec_internal - execute libata internal command + * ata_exec_internal_sg - execute libata internal command * @dev: Device to which the command is sent * @tf: Taskfile registers for the command and the result * @cdb: CDB for packet command - * @dma_dir: Data tranfer direction of the command - * @buf: Data buffer of the command - * @buflen: Length of data buffer + * @dma_dir: Data transfer direction of the command + * @sgl: sg list for the data buffer of the command + * @n_elem: Number of sg entries + * @timeout: Timeout in msecs (0 for default) * * Executes libata internal command with timeout. @tf contains * command on entry and result on return. Timeout and error @@ -1015,15 +1541,19 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc) * RETURNS: * Zero on success, AC_ERR_* mask on failure */ -unsigned ata_exec_internal(struct ata_device *dev, - struct ata_taskfile *tf, const u8 *cdb, - int dma_dir, void *buf, unsigned int buflen) +unsigned ata_exec_internal_sg(struct ata_device *dev, + struct ata_taskfile *tf, const u8 *cdb, + int dma_dir, struct scatterlist *sgl, + unsigned int n_elem, unsigned long timeout) { - struct ata_port *ap = dev->ap; + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; u8 command = tf->command; + int auto_timeout = 0; struct ata_queued_cmd *qc; unsigned int tag, preempted_tag; u32 preempted_sactive, preempted_qc_active; + int preempted_nr_active_links; DECLARE_COMPLETION_ONSTACK(wait); unsigned long flags; unsigned int err_mask; @@ -1059,22 +1589,36 @@ unsigned ata_exec_internal(struct ata_device *dev, qc->dev = dev; ata_qc_reinit(qc); - preempted_tag = ap->active_tag; - preempted_sactive = ap->sactive; + preempted_tag = link->active_tag; + preempted_sactive = link->sactive; preempted_qc_active = ap->qc_active; - ap->active_tag = ATA_TAG_POISON; - ap->sactive = 0; + preempted_nr_active_links = ap->nr_active_links; + link->active_tag = ATA_TAG_POISON; + link->sactive = 0; ap->qc_active = 0; + ap->nr_active_links = 0; /* prepare & issue qc */ qc->tf = *tf; if (cdb) memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); + + /* some SATA bridges need us to indicate data xfer direction */ + if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && + dma_dir == DMA_FROM_DEVICE) + qc->tf.feature |= ATAPI_DMADIR; + qc->flags |= ATA_QCFLAG_RESULT_TF; qc->dma_dir = dma_dir; if (dma_dir != DMA_NONE) { - ata_sg_init_one(qc, buf, buflen); - qc->nsect = buflen / ATA_SECT_SIZE; + unsigned int i, buflen = 0; + struct scatterlist *sg; + + for_each_sg(sgl, sg, n_elem, i) + buflen += sg->length; + + ata_sg_init(qc, sgl, n_elem); + qc->nbytes = buflen; } qc->private_data = &wait; @@ -1084,9 +1628,24 @@ unsigned ata_exec_internal(struct ata_device *dev, spin_unlock_irqrestore(ap->lock, flags); - rc = wait_for_completion_timeout(&wait, ata_probe_timeout); + if (!timeout) { + if (ata_probe_timeout) + timeout = ata_probe_timeout * 1000; + else { + timeout = ata_internal_cmd_timeout(dev, command); + auto_timeout = 1; + } + } - ata_port_flush_task(ap); + if (ap->ops->error_handler) + ata_eh_release(ap); + + rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); + + if (ap->ops->error_handler) + ata_eh_acquire(ap); + + ata_sff_flush_pio_task(ap); if (!rc) { spin_lock_irqsave(ap->lock, flags); @@ -1105,8 +1664,8 @@ unsigned ata_exec_internal(struct ata_device *dev, ata_qc_complete(qc); if (ata_msg_warn(ap)) - ata_dev_printk(dev, KERN_WARNING, - "qc timeout (cmd 0x%x)\n", command); + ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", + command); } spin_unlock_irqrestore(ap->lock, flags); @@ -1116,12 +1675,16 @@ unsigned ata_exec_internal(struct ata_device *dev, if (ap->ops->post_internal_cmd) ap->ops->post_internal_cmd(qc); - if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) { - if (ata_msg_warn(ap)) - ata_dev_printk(dev, KERN_WARNING, - "zero err_mask for failed " - "internal command, assuming AC_ERR_OTHER\n"); - qc->err_mask |= AC_ERR_OTHER; + /* perform minimal error analysis */ + if (qc->flags & ATA_QCFLAG_FAILED) { + if (qc->result_tf.command & (ATA_ERR | ATA_DF)) + qc->err_mask |= AC_ERR_DEV; + + if (!qc->err_mask) + qc->err_mask |= AC_ERR_OTHER; + + if (qc->err_mask & ~AC_ERR_OTHER) + qc->err_mask &= ~AC_ERR_OTHER; } /* finish up */ @@ -1131,32 +1694,58 @@ unsigned ata_exec_internal(struct ata_device *dev, err_mask = qc->err_mask; ata_qc_free(qc); - ap->active_tag = preempted_tag; - ap->sactive = preempted_sactive; + link->active_tag = preempted_tag; + link->sactive = preempted_sactive; ap->qc_active = preempted_qc_active; - - /* XXX - Some LLDDs (sata_mv) disable port on command failure. - * Until those drivers are fixed, we detect the condition - * here, fail the command with AC_ERR_SYSTEM and reenable the - * port. - * - * Note that this doesn't change any behavior as internal - * command failure results in disabling the device in the - * higher layer for LLDDs without new reset/EH callbacks. - * - * Kill the following code as soon as those drivers are fixed. - */ - if (ap->flags & ATA_FLAG_DISABLED) { - err_mask |= AC_ERR_SYSTEM; - ata_port_probe(ap); - } + ap->nr_active_links = preempted_nr_active_links; spin_unlock_irqrestore(ap->lock, flags); + if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) + ata_internal_cmd_timed_out(dev, command); + return err_mask; } /** + * ata_exec_internal - execute libata internal command + * @dev: Device to which the command is sent + * @tf: Taskfile registers for the command and the result + * @cdb: CDB for packet command + * @dma_dir: Data transfer direction of the command + * @buf: Data buffer of the command + * @buflen: Length of data buffer + * @timeout: Timeout in msecs (0 for default) + * + * Wrapper around ata_exec_internal_sg() which takes simple + * buffer instead of sg list. + * + * LOCKING: + * None. Should be called with kernel context, might sleep. + * + * RETURNS: + * Zero on success, AC_ERR_* mask on failure + */ +unsigned ata_exec_internal(struct ata_device *dev, + struct ata_taskfile *tf, const u8 *cdb, + int dma_dir, void *buf, unsigned int buflen, + unsigned long timeout) +{ + struct scatterlist *psg = NULL, sg; + unsigned int n_elem = 0; + + if (dma_dir != DMA_NONE) { + WARN_ON(!buf); + sg_init_one(&sg, buf, buflen); + psg = &sg; + n_elem++; + } + + return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, + timeout); +} + +/** * ata_do_simple_cmd - execute simple internal command * @dev: Device to which the command is sent * @cmd: Opcode to execute @@ -1180,7 +1769,7 @@ unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) tf.flags |= ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_NODATA; - return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); + return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); } /** @@ -1190,37 +1779,77 @@ unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) * Check if the current speed of the device requires IORDY. Used * by various controllers for chip configuration. */ - unsigned int ata_pio_need_iordy(const struct ata_device *adev) { - int pio; - int speed = adev->pio_mode - XFER_PIO_0; - - if (speed < 2) + /* Don't set IORDY if we're preparing for reset. IORDY may + * lead to controller lock up on certain controllers if the + * port is not occupied. See bko#11703 for details. + */ + if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) + return 0; + /* Controller doesn't support IORDY. Probably a pointless + * check as the caller should know this. + */ + if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) return 0; - if (speed > 2) + /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ + if (ata_id_is_cfa(adev->id) + && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) + return 0; + /* PIO3 and higher it is mandatory */ + if (adev->pio_mode > XFER_PIO_2) + return 1; + /* We turn it on when possible */ + if (ata_id_has_iordy(adev->id)) return 1; + return 0; +} +/** + * ata_pio_mask_no_iordy - Return the non IORDY mask + * @adev: ATA device + * + * Compute the highest mode possible if we are not using iordy. Return + * -1 if no iordy mode is available. + */ +static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) +{ /* If we have no drive specific rule, then PIO 2 is non IORDY */ - if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ - pio = adev->id[ATA_ID_EIDE_PIO]; + u16 pio = adev->id[ATA_ID_EIDE_PIO]; /* Is the speed faster than the drive allows non IORDY ? */ if (pio) { /* This is cycle times not frequency - watch the logic! */ if (pio > 240) /* PIO2 is 240nS per cycle */ - return 1; - return 0; + return 3 << ATA_SHIFT_PIO; + return 7 << ATA_SHIFT_PIO; } } - return 0; + return 3 << ATA_SHIFT_PIO; +} + +/** + * ata_do_dev_read_id - default ID read method + * @dev: device + * @tf: proposed taskfile + * @id: data buffer + * + * Issue the identify taskfile and hand back the buffer containing + * identify data. For some RAID controllers and for pre ATA devices + * this function is wrapped or replaced by the driver + */ +unsigned int ata_do_dev_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id) +{ + return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, + id, sizeof(id[0]) * ATA_ID_WORDS, 0); } /** * ata_dev_read_id - Read ID data from the specified device * @dev: target device * @p_class: pointer to class of the target device (may be changed) - * @post_reset: is this read ID post-reset? + * @flags: ATA_READID_* flags * @id: buffer to read IDENTIFY data into * * Read ID data from the specified device. ATA_CMD_ID_ATA is @@ -1228,6 +1857,9 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS * for pre-ATA4 drives. * + * FIXME: ATA_CMD_ID_ATA is optional for early drives and right + * now we abort if we hit that case. + * * LOCKING: * Kernel thread context (may sleep) * @@ -1235,25 +1867,26 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) * 0 on success, -errno otherwise. */ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, - int post_reset, u16 *id) + unsigned int flags, u16 *id) { - struct ata_port *ap = dev->ap; + struct ata_port *ap = dev->link->ap; unsigned int class = *p_class; struct ata_taskfile tf; unsigned int err_mask = 0; const char *reason; + bool is_semb = class == ATA_DEV_SEMB; + int may_fallback = 1, tried_spinup = 0; int rc; if (ata_msg_ctl(ap)) - ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", - __FUNCTION__, ap->id, dev->devno); - - ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ + ata_dev_dbg(dev, "%s: ENTER\n", __func__); - retry: +retry: ata_tf_init(dev, &tf); switch (class) { + case ATA_DEV_SEMB: + class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ case ATA_DEV_ATA: tf.command = ATA_CMD_ID_ATA; break; @@ -1268,36 +1901,130 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, tf.protocol = ATA_PROT_PIO; - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, - id, sizeof(id[0]) * ATA_ID_WORDS); + /* Some devices choke if TF registers contain garbage. Make + * sure those are properly initialized. + */ + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + + /* Device presence detection is unreliable on some + * controllers. Always poll IDENTIFY if available. + */ + tf.flags |= ATA_TFLAG_POLLING; + + if (ap->ops->read_id) + err_mask = ap->ops->read_id(dev, &tf, id); + else + err_mask = ata_do_dev_read_id(dev, &tf, id); + if (err_mask) { + if (err_mask & AC_ERR_NODEV_HINT) { + ata_dev_dbg(dev, "NODEV after polling detection\n"); + return -ENOENT; + } + + if (is_semb) { + ata_dev_info(dev, + "IDENTIFY failed on device w/ SEMB sig, disabled\n"); + /* SEMB is not supported yet */ + *p_class = ATA_DEV_SEMB_UNSUP; + return 0; + } + + if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { + /* Device or controller might have reported + * the wrong device class. Give a shot at the + * other IDENTIFY if the current one is + * aborted by the device. + */ + if (may_fallback) { + may_fallback = 0; + + if (class == ATA_DEV_ATA) + class = ATA_DEV_ATAPI; + else + class = ATA_DEV_ATA; + goto retry; + } + + /* Control reaches here iff the device aborted + * both flavors of IDENTIFYs which happens + * sometimes with phantom devices. + */ + ata_dev_dbg(dev, + "both IDENTIFYs aborted, assuming NODEV\n"); + return -ENOENT; + } + rc = -EIO; reason = "I/O error"; goto err_out; } + if (dev->horkage & ATA_HORKAGE_DUMP_ID) { + ata_dev_dbg(dev, "dumping IDENTIFY data, " + "class=%d may_fallback=%d tried_spinup=%d\n", + class, may_fallback, tried_spinup); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, + 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); + } + + /* Falling back doesn't make sense if ID data was read + * successfully at least once. + */ + may_fallback = 0; + swap_buf_le16(id, ATA_ID_WORDS); /* sanity check */ rc = -EINVAL; - reason = "device reports illegal type"; + reason = "device reports invalid type"; if (class == ATA_DEV_ATA) { if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) goto err_out; + if (ap->host->flags & ATA_HOST_IGNORE_ATA && + ata_id_is_ata(id)) { + ata_dev_dbg(dev, + "host indicates ignore ATA devices, ignored\n"); + return -ENOENT; + } } else { if (ata_id_is_ata(id)) goto err_out; } - if (post_reset && class == ATA_DEV_ATA) { + if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { + tried_spinup = 1; + /* + * Drive powered-up in standby mode, and requires a specific + * SET_FEATURES spin-up subcommand before it will accept + * anything other than the original IDENTIFY command. + */ + err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); + if (err_mask && id[2] != 0x738c) { + rc = -EIO; + reason = "SPINUP failed"; + goto err_out; + } + /* + * If the drive initially returned incomplete IDENTIFY info, + * we now must reissue the IDENTIFY command. + */ + if (id[2] == 0x37c8) + goto retry; + } + + if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { /* * The exact sequence expected by certain pre-ATA4 drives is: * SRST RESET - * IDENTIFY - * INITIALIZE DEVICE PARAMETERS + * IDENTIFY (optional in early ATA) + * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) * anything else.. * Some drives were very specific about that exact sequence. + * + * Note that ATA4 says lba is mandatory so the second check + * should never trigger. */ if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { err_mask = ata_dev_init_params(dev, id[3], id[6]); @@ -1310,7 +2037,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, /* current CHS translation info (id[53-58]) might be * changed. reread the identify device info. */ - post_reset = 0; + flags &= ~ATA_READID_POSTRESET; goto retry; } } @@ -1321,56 +2048,126 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, err_out: if (ata_msg_warn(ap)) - ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " - "(%s, err_mask=0x%x)\n", reason, err_mask); + ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", + reason, err_mask); return rc; } +static int ata_do_link_spd_horkage(struct ata_device *dev) +{ + struct ata_link *plink = ata_dev_phys_link(dev); + u32 target, target_limit; + + if (!sata_scr_valid(plink)) + return 0; + + if (dev->horkage & ATA_HORKAGE_1_5_GBPS) + target = 1; + else + return 0; + + target_limit = (1 << target) - 1; + + /* if already on stricter limit, no need to push further */ + if (plink->sata_spd_limit <= target_limit) + return 0; + + plink->sata_spd_limit = target_limit; + + /* Request another EH round by returning -EAGAIN if link is + * going faster than the target speed. Forward progress is + * guaranteed by setting sata_spd_limit to target_limit above. + */ + if (plink->sata_spd > target) { + ata_dev_info(dev, "applying link speed limit horkage to %s\n", + sata_spd_string(target)); + return -EAGAIN; + } + return 0; +} + static inline u8 ata_dev_knobble(struct ata_device *dev) { - return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); + struct ata_port *ap = dev->link->ap; + + if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) + return 0; + + return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); } -static void ata_dev_config_ncq(struct ata_device *dev, +static int ata_dev_config_ncq(struct ata_device *dev, char *desc, size_t desc_sz) { - struct ata_port *ap = dev->ap; + struct ata_port *ap = dev->link->ap; int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); + unsigned int err_mask; + char *aa_desc = ""; if (!ata_id_has_ncq(dev->id)) { desc[0] = '\0'; - return; + return 0; + } + if (dev->horkage & ATA_HORKAGE_NONCQ) { + snprintf(desc, desc_sz, "NCQ (not used)"); + return 0; } - if (ap->flags & ATA_FLAG_NCQ) { hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); dev->flags |= ATA_DFLAG_NCQ; } + if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && + (ap->flags & ATA_FLAG_FPDMA_AA) && + ata_id_has_fpdma_aa(dev->id)) { + err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, + SATA_FPDMA_AA); + if (err_mask) { + ata_dev_err(dev, + "failed to enable AA (error_mask=0x%x)\n", + err_mask); + if (err_mask != AC_ERR_DEV) { + dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; + return -EIO; + } + } else + aa_desc = ", AA"; + } + if (hdepth >= ddepth) - snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth); + snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); else - snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); -} - -static void ata_set_port_max_cmd_len(struct ata_port *ap) -{ - int i; - - if (ap->scsi_host) { - unsigned int len = 0; + snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, + ddepth, aa_desc); + + if ((ap->flags & ATA_FLAG_FPDMA_AUX) && + ata_id_has_ncq_send_and_recv(dev->id)) { + err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, + 0, ap->sector_buf, 1); + if (err_mask) { + ata_dev_dbg(dev, + "failed to get NCQ Send/Recv Log Emask 0x%x\n", + err_mask); + } else { + u8 *cmds = dev->ncq_send_recv_cmds; - for (i = 0; i < ATA_MAX_DEVICES; i++) - len = max(len, ap->device[i].cdb_len); + dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; + memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); - ap->scsi_host->max_cmd_len = len; + if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { + ata_dev_dbg(dev, "disabling queued TRIM support\n"); + cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= + ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; + } + } } + + return 0; } /** * ata_dev_configure - Configure the specified ATA/ATAPI device * @dev: Target device to configure - * @print_info: Enable device info printout * * Configure @dev according to @dev->id. Generic and low-level * driver specific fixups are also applied. @@ -1381,33 +2178,78 @@ static void ata_set_port_max_cmd_len(struct ata_port *ap) * RETURNS: * 0 on success, -errno otherwise */ -int ata_dev_configure(struct ata_device *dev, int print_info) +int ata_dev_configure(struct ata_device *dev) { - struct ata_port *ap = dev->ap; + struct ata_port *ap = dev->link->ap; + struct ata_eh_context *ehc = &dev->link->eh_context; + int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; const u16 *id = dev->id; - unsigned int xfer_mask; + unsigned long xfer_mask; + unsigned int err_mask; char revbuf[7]; /* XYZ-99\0 */ + char fwrevbuf[ATA_ID_FW_REV_LEN+1]; + char modelbuf[ATA_ID_PROD_LEN+1]; int rc; if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { - ata_dev_printk(dev, KERN_INFO, - "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n", - __FUNCTION__, ap->id, dev->devno); + ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); return 0; } if (ata_msg_probe(ap)) - ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", - __FUNCTION__, ap->id, dev->devno); + ata_dev_dbg(dev, "%s: ENTER\n", __func__); + + /* set horkage */ + dev->horkage |= ata_dev_blacklisted(dev); + ata_force_horkage(dev); + + if (dev->horkage & ATA_HORKAGE_DISABLE) { + ata_dev_info(dev, "unsupported device, disabling\n"); + ata_dev_disable(dev); + return 0; + } + + if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && + dev->class == ATA_DEV_ATAPI) { + ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", + atapi_enabled ? "not supported with this driver" + : "disabled"); + ata_dev_disable(dev); + return 0; + } + + rc = ata_do_link_spd_horkage(dev); + if (rc) + return rc; + + /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ + if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && + (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) + dev->horkage |= ATA_HORKAGE_NOLPM; + + if (dev->horkage & ATA_HORKAGE_NOLPM) { + ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); + dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; + } + + /* let ACPI work its magic */ + rc = ata_acpi_on_devcfg(dev); + if (rc) + return rc; + + /* massage HPA, do it early as it might change IDENTIFY data */ + rc = ata_hpa_resize(dev); + if (rc) + return rc; /* print device capabilities */ if (ata_msg_probe(ap)) - ata_dev_printk(dev, KERN_DEBUG, - "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " - "85:%04x 86:%04x 87:%04x 88:%04x\n", - __FUNCTION__, - id[49], id[82], id[83], id[84], - id[85], id[86], id[87], id[88]); + ata_dev_dbg(dev, + "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " + "85:%04x 86:%04x 87:%04x 88:%04x\n", + __func__, + id[49], id[82], id[83], id[84], + id[85], id[86], id[87], id[88]); /* initialize to-be-configured parameters */ dev->flags &= ~ATA_DFLAG_CFG_MASK; @@ -1417,6 +2259,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info) dev->cylinders = 0; dev->heads = 0; dev->sectors = 0; + dev->multi_count = 0; /* * common ATA, ATAPI feature tests @@ -1428,41 +2271,71 @@ int ata_dev_configure(struct ata_device *dev, int print_info) if (ata_msg_probe(ap)) ata_dump_id(id); + /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ + ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, + sizeof(fwrevbuf)); + + ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, + sizeof(modelbuf)); + /* ATA-specific feature tests */ if (dev->class == ATA_DEV_ATA) { if (ata_id_is_cfa(id)) { - if (id[162] & 1) /* CPRM may make this media unusable */ - ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n", - ap->id, dev->devno); + /* CPRM may make this media unusable */ + if (id[ATA_ID_CFA_KEY_MGMT] & 1) + ata_dev_warn(dev, + "supports DRM functions and may not be fully accessible\n"); snprintf(revbuf, 7, "CFA"); + } else { + snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); + /* Warn the user if the device has TPM extensions */ + if (ata_id_has_tpm(id)) + ata_dev_warn(dev, + "supports DRM functions and may not be fully accessible\n"); } - else - snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); dev->n_sectors = ata_id_n_sectors(id); + /* get current R/W Multiple count setting */ + if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { + unsigned int max = dev->id[47] & 0xff; + unsigned int cnt = dev->id[59] & 0xff; + /* only recognize/allow powers of two here */ + if (is_power_of_2(max) && is_power_of_2(cnt)) + if (cnt <= max) + dev->multi_count = cnt; + } + if (ata_id_has_lba(id)) { const char *lba_desc; - char ncq_desc[20]; + char ncq_desc[24]; lba_desc = "LBA"; dev->flags |= ATA_DFLAG_LBA; if (ata_id_has_lba48(id)) { dev->flags |= ATA_DFLAG_LBA48; lba_desc = "LBA48"; + + if (dev->n_sectors >= (1UL << 28) && + ata_id_has_flush_ext(id)) + dev->flags |= ATA_DFLAG_FLUSH_EXT; } /* config NCQ */ - ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); + rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); + if (rc) + return rc; /* print device info to dmesg */ - if (ata_msg_drv(ap) && print_info) - ata_dev_printk(dev, KERN_INFO, "%s, " - "max %s, %Lu sectors: %s %s\n", - revbuf, - ata_mode_string(xfer_mask), + if (ata_msg_drv(ap) && print_info) { + ata_dev_info(dev, "%s: %s, %s, max %s\n", + revbuf, modelbuf, fwrevbuf, + ata_mode_string(xfer_mask)); + ata_dev_info(dev, + "%llu sectors, multi %u: %s %s\n", (unsigned long long)dev->n_sectors, - lba_desc, ncq_desc); + dev->multi_count, lba_desc, ncq_desc); + } } else { /* CHS */ @@ -1479,22 +2352,40 @@ int ata_dev_configure(struct ata_device *dev, int print_info) } /* print device info to dmesg */ - if (ata_msg_drv(ap) && print_info) - ata_dev_printk(dev, KERN_INFO, "%s, " - "max %s, %Lu sectors: CHS %u/%u/%u\n", - revbuf, - ata_mode_string(xfer_mask), - (unsigned long long)dev->n_sectors, - dev->cylinders, dev->heads, - dev->sectors); + if (ata_msg_drv(ap) && print_info) { + ata_dev_info(dev, "%s: %s, %s, max %s\n", + revbuf, modelbuf, fwrevbuf, + ata_mode_string(xfer_mask)); + ata_dev_info(dev, + "%llu sectors, multi %u, CHS %u/%u/%u\n", + (unsigned long long)dev->n_sectors, + dev->multi_count, dev->cylinders, + dev->heads, dev->sectors); + } } - if (dev->id[59] & 0x100) { - dev->multi_count = dev->id[59] & 0xff; - if (ata_msg_drv(ap) && print_info) - ata_dev_printk(dev, KERN_INFO, - "ata%u: dev %u multi count %u\n", - ap->id, dev->devno, dev->multi_count); + /* Check and mark DevSlp capability. Get DevSlp timing variables + * from SATA Settings page of Identify Device Data Log. + */ + if (ata_id_has_devslp(dev->id)) { + u8 *sata_setting = ap->sector_buf; + int i, j; + + dev->flags |= ATA_DFLAG_DEVSLP; + err_mask = ata_read_log_page(dev, + ATA_LOG_SATA_ID_DEV_DATA, + ATA_LOG_SATA_SETTINGS, + sata_setting, + 1); + if (err_mask) + ata_dev_dbg(dev, + "failed to get Identify Device Data, Emask 0x%x\n", + err_mask); + else + for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { + j = ATA_LOG_DEVSLP_OFFSET + i; + dev->devslp_timing[i] = sata_setting[j]; + } } dev->cdb_len = 16; @@ -1502,69 +2393,188 @@ int ata_dev_configure(struct ata_device *dev, int print_info) /* ATAPI-specific feature tests */ else if (dev->class == ATA_DEV_ATAPI) { - char *cdb_intr_string = ""; + const char *cdb_intr_string = ""; + const char *atapi_an_string = ""; + const char *dma_dir_string = ""; + u32 sntf; rc = atapi_cdb_len(id); if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { if (ata_msg_warn(ap)) - ata_dev_printk(dev, KERN_WARNING, - "unsupported CDB len\n"); + ata_dev_warn(dev, "unsupported CDB len\n"); rc = -EINVAL; goto err_out_nosup; } dev->cdb_len = (unsigned int) rc; + /* Enable ATAPI AN if both the host and device have + * the support. If PMP is attached, SNTF is required + * to enable ATAPI AN to discern between PHY status + * changed notifications and ATAPI ANs. + */ + if (atapi_an && + (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && + (!sata_pmp_attached(ap) || + sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { + /* issue SET feature command to turn this on */ + err_mask = ata_dev_set_feature(dev, + SETFEATURES_SATA_ENABLE, SATA_AN); + if (err_mask) + ata_dev_err(dev, + "failed to enable ATAPI AN (err_mask=0x%x)\n", + err_mask); + else { + dev->flags |= ATA_DFLAG_AN; + atapi_an_string = ", ATAPI AN"; + } + } + if (ata_id_cdb_intr(dev->id)) { dev->flags |= ATA_DFLAG_CDB_INTR; cdb_intr_string = ", CDB intr"; } + if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) { + dev->flags |= ATA_DFLAG_DMADIR; + dma_dir_string = ", DMADIR"; + } + + if (ata_id_has_da(dev->id)) { + dev->flags |= ATA_DFLAG_DA; + zpodd_init(dev); + } + /* print device info to dmesg */ if (ata_msg_drv(ap) && print_info) - ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n", - ata_mode_string(xfer_mask), - cdb_intr_string); + ata_dev_info(dev, + "ATAPI: %s, %s, max %s%s%s%s\n", + modelbuf, fwrevbuf, + ata_mode_string(xfer_mask), + cdb_intr_string, atapi_an_string, + dma_dir_string); } + /* determine max_sectors */ + dev->max_sectors = ATA_MAX_SECTORS; + if (dev->flags & ATA_DFLAG_LBA48) + dev->max_sectors = ATA_MAX_SECTORS_LBA48; + + /* Limit PATA drive on SATA cable bridge transfers to udma5, + 200 sectors */ + if (ata_dev_knobble(dev)) { + if (ata_msg_drv(ap) && print_info) + ata_dev_info(dev, "applying bridge limits\n"); + dev->udma_mask &= ATA_UDMA5; + dev->max_sectors = ATA_MAX_SECTORS; + } + + if ((dev->class == ATA_DEV_ATAPI) && + (atapi_command_packet_set(id) == TYPE_TAPE)) { + dev->max_sectors = ATA_MAX_SECTORS_TAPE; + dev->horkage |= ATA_HORKAGE_STUCK_ERR; + } + + if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) + dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, + dev->max_sectors); + + if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) + dev->max_sectors = ATA_MAX_SECTORS_LBA48; + + if (ap->ops->dev_config) + ap->ops->dev_config(dev); + if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { /* Let the user know. We don't want to disallow opens for rescue purposes, or in case the vendor is just a blithering - idiot */ - if (print_info) { - ata_dev_printk(dev, KERN_WARNING, + idiot. Do this after the dev_config call as some controllers + with buggy firmware may want to avoid reporting false device + bugs */ + + if (print_info) { + ata_dev_warn(dev, "Drive reports diagnostics failure. This may indicate a drive\n"); - ata_dev_printk(dev, KERN_WARNING, + ata_dev_warn(dev, "fault or invalid emulation. Contact drive vendor for information.\n"); } } - ata_set_port_max_cmd_len(ap); - - /* limit bridge transfers to udma5, 200 sectors */ - if (ata_dev_knobble(dev)) { - if (ata_msg_drv(ap) && print_info) - ata_dev_printk(dev, KERN_INFO, - "applying bridge limits\n"); - dev->udma_mask &= ATA_UDMA5; - dev->max_sectors = ATA_MAX_SECTORS; + if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { + ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); + ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); } - if (ap->ops->dev_config) - ap->ops->dev_config(ap, dev); - - if (ata_msg_probe(ap)) - ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n", - __FUNCTION__, ata_chk_status(ap)); return 0; err_out_nosup: if (ata_msg_probe(ap)) - ata_dev_printk(dev, KERN_DEBUG, - "%s: EXIT, err\n", __FUNCTION__); + ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); return rc; } /** + * ata_cable_40wire - return 40 wire cable type + * @ap: port + * + * Helper method for drivers which want to hardwire 40 wire cable + * detection. + */ + +int ata_cable_40wire(struct ata_port *ap) +{ + return ATA_CBL_PATA40; +} + +/** + * ata_cable_80wire - return 80 wire cable type + * @ap: port + * + * Helper method for drivers which want to hardwire 80 wire cable + * detection. + */ + +int ata_cable_80wire(struct ata_port *ap) +{ + return ATA_CBL_PATA80; +} + +/** + * ata_cable_unknown - return unknown PATA cable. + * @ap: port + * + * Helper method for drivers which have no PATA cable detection. + */ + +int ata_cable_unknown(struct ata_port *ap) +{ + return ATA_CBL_PATA_UNK; +} + +/** + * ata_cable_ignore - return ignored PATA cable. + * @ap: port + * + * Helper method for drivers which don't use cable type to limit + * transfer mode. + */ +int ata_cable_ignore(struct ata_port *ap) +{ + return ATA_CBL_PATA_IGN; +} + +/** + * ata_cable_sata - return SATA cable type + * @ap: port + * + * Helper method for drivers which have SATA cables + */ + +int ata_cable_sata(struct ata_port *ap) +{ + return ATA_CBL_SATA; +} + +/** * ata_bus_probe - Reset and probe ATA bus * @ap: Bus to probe * @@ -1583,25 +2593,38 @@ int ata_bus_probe(struct ata_port *ap) { unsigned int classes[ATA_MAX_DEVICES]; int tries[ATA_MAX_DEVICES]; - int i, rc, down_xfermask; + int rc; struct ata_device *dev; - ata_port_probe(ap); - - for (i = 0; i < ATA_MAX_DEVICES; i++) - tries[i] = ATA_PROBE_MAX_TRIES; + ata_for_each_dev(dev, &ap->link, ALL) + tries[dev->devno] = ATA_PROBE_MAX_TRIES; retry: - down_xfermask = 0; + ata_for_each_dev(dev, &ap->link, ALL) { + /* If we issue an SRST then an ATA drive (not ATAPI) + * may change configuration and be in PIO0 timing. If + * we do a hard reset (or are coming from power on) + * this is true for ATA or ATAPI. Until we've set a + * suitable controller mode we should not touch the + * bus as we may be talking too fast. + */ + dev->pio_mode = XFER_PIO_0; + dev->dma_mode = 0xff; + + /* If the controller has a pio mode setup function + * then use it to set the chipset to rights. Don't + * touch the DMA setup as that will be dealt with when + * configuring devices. + */ + if (ap->ops->set_piomode) + ap->ops->set_piomode(ap, dev); + } /* reset and determine device classes */ ap->ops->phy_reset(ap); - for (i = 0; i < ATA_MAX_DEVICES; i++) { - dev = &ap->device[i]; - - if (!(ap->flags & ATA_FLAG_DISABLED) && - dev->class != ATA_DEV_UNKNOWN) + ata_for_each_dev(dev, &ap->link, ALL) { + if (dev->class != ATA_DEV_UNKNOWN) classes[dev->devno] = dev->class; else classes[dev->devno] = ATA_DEV_NONE; @@ -1609,193 +2632,113 @@ int ata_bus_probe(struct ata_port *ap) dev->class = ATA_DEV_UNKNOWN; } - ata_port_probe(ap); - - /* after the reset the device state is PIO 0 and the controller - state is undefined. Record the mode */ - - for (i = 0; i < ATA_MAX_DEVICES; i++) - ap->device[i].pio_mode = XFER_PIO_0; - - /* read IDENTIFY page and configure devices */ - for (i = 0; i < ATA_MAX_DEVICES; i++) { - dev = &ap->device[i]; + /* read IDENTIFY page and configure devices. We have to do the identify + specific sequence bass-ackwards so that PDIAG- is released by + the slave device */ - if (tries[i]) - dev->class = classes[i]; + ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { + if (tries[dev->devno]) + dev->class = classes[dev->devno]; if (!ata_dev_enabled(dev)) continue; - rc = ata_dev_read_id(dev, &dev->class, 1, dev->id); + rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, + dev->id); if (rc) goto fail; + } + + /* Now ask for the cable type as PDIAG- should have been released */ + if (ap->ops->cable_detect) + ap->cbl = ap->ops->cable_detect(ap); + + /* We may have SATA bridge glue hiding here irrespective of + * the reported cable types and sensed types. When SATA + * drives indicate we have a bridge, we don't know which end + * of the link the bridge is which is a problem. + */ + ata_for_each_dev(dev, &ap->link, ENABLED) + if (ata_id_is_sata(dev->id)) + ap->cbl = ATA_CBL_SATA; - rc = ata_dev_configure(dev, 1); + /* After the identify sequence we can now set up the devices. We do + this in the normal order so that the user doesn't get confused */ + + ata_for_each_dev(dev, &ap->link, ENABLED) { + ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; + rc = ata_dev_configure(dev); + ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; if (rc) goto fail; } /* configure transfer mode */ - rc = ata_set_mode(ap, &dev); - if (rc) { - down_xfermask = 1; + rc = ata_set_mode(&ap->link, &dev); + if (rc) goto fail; - } - for (i = 0; i < ATA_MAX_DEVICES; i++) - if (ata_dev_enabled(&ap->device[i])) - return 0; + ata_for_each_dev(dev, &ap->link, ENABLED) + return 0; - /* no device present, disable port */ - ata_port_disable(ap); - ap->ops->port_disable(ap); return -ENODEV; fail: + tries[dev->devno]--; + switch (rc) { case -EINVAL: - case -ENODEV: + /* eeek, something went very wrong, give up */ tries[dev->devno] = 0; break; + + case -ENODEV: + /* give it just one more chance */ + tries[dev->devno] = min(tries[dev->devno], 1); case -EIO: - sata_down_spd_limit(ap); - /* fall through */ - default: - tries[dev->devno]--; - if (down_xfermask && - ata_down_xfermask_limit(dev, tries[dev->devno] == 1)) - tries[dev->devno] = 0; + if (tries[dev->devno] == 1) { + /* This is the last chance, better to slow + * down than lose it. + */ + sata_down_spd_limit(&ap->link, 0); + ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); + } } - if (!tries[dev->devno]) { - ata_down_xfermask_limit(dev, 1); + if (!tries[dev->devno]) ata_dev_disable(dev); - } goto retry; } /** - * ata_port_probe - Mark port as enabled - * @ap: Port for which we indicate enablement - * - * Modify @ap data structure such that the system - * thinks that the entire port is enabled. - * - * LOCKING: host lock, or some other form of - * serialization. - */ - -void ata_port_probe(struct ata_port *ap) -{ - ap->flags &= ~ATA_FLAG_DISABLED; -} - -/** * sata_print_link_status - Print SATA link status - * @ap: SATA port to printk link status about + * @link: SATA link to printk link status about * * This function prints link speed and status of a SATA link. * * LOCKING: * None. */ -static void sata_print_link_status(struct ata_port *ap) +static void sata_print_link_status(struct ata_link *link) { u32 sstatus, scontrol, tmp; - if (sata_scr_read(ap, SCR_STATUS, &sstatus)) + if (sata_scr_read(link, SCR_STATUS, &sstatus)) return; - sata_scr_read(ap, SCR_CONTROL, &scontrol); + sata_scr_read(link, SCR_CONTROL, &scontrol); - if (ata_port_online(ap)) { + if (ata_phys_link_online(link)) { tmp = (sstatus >> 4) & 0xf; - ata_port_printk(ap, KERN_INFO, - "SATA link up %s (SStatus %X SControl %X)\n", - sata_spd_string(tmp), sstatus, scontrol); + ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", + sata_spd_string(tmp), sstatus, scontrol); } else { - ata_port_printk(ap, KERN_INFO, - "SATA link down (SStatus %X SControl %X)\n", - sstatus, scontrol); + ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", + sstatus, scontrol); } } /** - * __sata_phy_reset - Wake/reset a low-level SATA PHY - * @ap: SATA port associated with target SATA PHY. - * - * This function issues commands to standard SATA Sxxx - * PHY registers, to wake up the phy (and device), and - * clear any reset condition. - * - * LOCKING: - * PCI/etc. bus probe sem. - * - */ -void __sata_phy_reset(struct ata_port *ap) -{ - u32 sstatus; - unsigned long timeout = jiffies + (HZ * 5); - - if (ap->flags & ATA_FLAG_SATA_RESET) { - /* issue phy wake/reset */ - sata_scr_write_flush(ap, SCR_CONTROL, 0x301); - /* Couldn't find anything in SATA I/II specs, but - * AHCI-1.1 10.4.2 says at least 1 ms. */ - mdelay(1); - } - /* phy wake/clear reset */ - sata_scr_write_flush(ap, SCR_CONTROL, 0x300); - - /* wait for phy to become ready, if necessary */ - do { - msleep(200); - sata_scr_read(ap, SCR_STATUS, &sstatus); - if ((sstatus & 0xf) != 1) - break; - } while (time_before(jiffies, timeout)); - - /* print link status */ - sata_print_link_status(ap); - - /* TODO: phy layer with polling, timeouts, etc. */ - if (!ata_port_offline(ap)) - ata_port_probe(ap); - else - ata_port_disable(ap); - - if (ap->flags & ATA_FLAG_DISABLED) - return; - - if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { - ata_port_disable(ap); - return; - } - - ap->cbl = ATA_CBL_SATA; -} - -/** - * sata_phy_reset - Reset SATA bus. - * @ap: SATA port associated with target SATA PHY. - * - * This function resets the SATA bus, and then probes - * the bus for devices. - * - * LOCKING: - * PCI/etc. bus probe sem. - * - */ -void sata_phy_reset(struct ata_port *ap) -{ - __sata_phy_reset(ap); - if (ap->flags & ATA_FLAG_DISABLED) - return; - ata_bus_reset(ap); -} - -/** * ata_dev_pair - return other device on cable * @adev: device * @@ -1805,99 +2748,118 @@ void sata_phy_reset(struct ata_port *ap) struct ata_device *ata_dev_pair(struct ata_device *adev) { - struct ata_port *ap = adev->ap; - struct ata_device *pair = &ap->device[1 - adev->devno]; + struct ata_link *link = adev->link; + struct ata_device *pair = &link->device[1 - adev->devno]; if (!ata_dev_enabled(pair)) return NULL; return pair; } /** - * ata_port_disable - Disable port. - * @ap: Port to be disabled. - * - * Modify @ap data structure such that the system - * thinks that the entire port is disabled, and should - * never attempt to probe or communicate with devices - * on this port. - * - * LOCKING: host lock, or some other form of - * serialization. - */ - -void ata_port_disable(struct ata_port *ap) -{ - ap->device[0].class = ATA_DEV_NONE; - ap->device[1].class = ATA_DEV_NONE; - ap->flags |= ATA_FLAG_DISABLED; -} - -/** * sata_down_spd_limit - adjust SATA spd limit downward - * @ap: Port to adjust SATA spd limit for + * @link: Link to adjust SATA spd limit for + * @spd_limit: Additional limit * - * Adjust SATA spd limit of @ap downward. Note that this + * Adjust SATA spd limit of @link downward. Note that this * function only adjusts the limit. The change must be applied * using sata_set_spd(). * + * If @spd_limit is non-zero, the speed is limited to equal to or + * lower than @spd_limit if such speed is supported. If + * @spd_limit is slower than any supported speed, only the lowest + * supported speed is allowed. + * * LOCKING: * Inherited from caller. * * RETURNS: * 0 on success, negative errno on failure */ -int sata_down_spd_limit(struct ata_port *ap) +int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) { u32 sstatus, spd, mask; - int rc, highbit; + int rc, bit; - rc = sata_scr_read(ap, SCR_STATUS, &sstatus); - if (rc) - return rc; + if (!sata_scr_valid(link)) + return -EOPNOTSUPP; + + /* If SCR can be read, use it to determine the current SPD. + * If not, use cached value in link->sata_spd. + */ + rc = sata_scr_read(link, SCR_STATUS, &sstatus); + if (rc == 0 && ata_sstatus_online(sstatus)) + spd = (sstatus >> 4) & 0xf; + else + spd = link->sata_spd; - mask = ap->sata_spd_limit; + mask = link->sata_spd_limit; if (mask <= 1) return -EINVAL; - highbit = fls(mask) - 1; - mask &= ~(1 << highbit); - spd = (sstatus >> 4) & 0xf; - if (spd <= 1) - return -EINVAL; - spd--; - mask &= (1 << spd) - 1; + /* unconditionally mask off the highest bit */ + bit = fls(mask) - 1; + mask &= ~(1 << bit); + + /* Mask off all speeds higher than or equal to the current + * one. Force 1.5Gbps if current SPD is not available. + */ + if (spd > 1) + mask &= (1 << (spd - 1)) - 1; + else + mask &= 1; + + /* were we already at the bottom? */ if (!mask) return -EINVAL; - ap->sata_spd_limit = mask; + if (spd_limit) { + if (mask & ((1 << spd_limit) - 1)) + mask &= (1 << spd_limit) - 1; + else { + bit = ffs(mask) - 1; + mask = 1 << bit; + } + } - ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n", - sata_spd_string(fls(mask))); + link->sata_spd_limit = mask; + + ata_link_warn(link, "limiting SATA link speed to %s\n", + sata_spd_string(fls(mask))); return 0; } -static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol) +static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) { - u32 spd, limit; + struct ata_link *host_link = &link->ap->link; + u32 limit, target, spd; + + limit = link->sata_spd_limit; - if (ap->sata_spd_limit == UINT_MAX) - limit = 0; + /* Don't configure downstream link faster than upstream link. + * It doesn't speed up anything and some PMPs choke on such + * configuration. + */ + if (!ata_is_host_link(link) && host_link->sata_spd) + limit &= (1 << host_link->sata_spd) - 1; + + if (limit == UINT_MAX) + target = 0; else - limit = fls(ap->sata_spd_limit); + target = fls(limit); spd = (*scontrol >> 4) & 0xf; - *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4); + *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); - return spd != limit; + return spd != target; } /** * sata_set_spd_needed - is SATA spd configuration needed - * @ap: Port in question + * @link: Link in question * * Test whether the spd limit in SControl matches - * @ap->sata_spd_limit. This function is used to determine + * @link->sata_spd_limit. This function is used to determine * whether hardreset is necessary to apply SATA spd * configuration. * @@ -1907,21 +2869,21 @@ static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol) * RETURNS: * 1 if SATA spd configuration is needed, 0 otherwise. */ -int sata_set_spd_needed(struct ata_port *ap) +static int sata_set_spd_needed(struct ata_link *link) { u32 scontrol; - if (sata_scr_read(ap, SCR_CONTROL, &scontrol)) - return 0; + if (sata_scr_read(link, SCR_CONTROL, &scontrol)) + return 1; - return __sata_set_spd_needed(ap, &scontrol); + return __sata_set_spd_needed(link, &scontrol); } /** * sata_set_spd - set SATA spd according to spd limit - * @ap: Port to set SATA spd for + * @link: Link to set SATA spd for * - * Set SATA spd of @ap according to sata_spd_limit. + * Set SATA spd of @link according to sata_spd_limit. * * LOCKING: * Inherited from caller. @@ -1930,18 +2892,18 @@ int sata_set_spd_needed(struct ata_port *ap) * 0 if spd doesn't need to be changed, 1 if spd has been * changed. Negative errno if SCR registers are inaccessible. */ -int sata_set_spd(struct ata_port *ap) +int sata_set_spd(struct ata_link *link) { u32 scontrol; int rc; - if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) return rc; - if (!__sata_set_spd_needed(ap, &scontrol)) + if (!__sata_set_spd_needed(link, &scontrol)) return 0; - if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) + if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) return rc; return 1; @@ -1960,55 +2922,51 @@ int sata_set_spd(struct ata_port *ap) */ static const struct ata_timing ata_timing[] = { - - { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, - { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, - { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, - { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, - - { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 }, - { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 }, - { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, - { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, - { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, - -/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ - - { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, - { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, - { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, - - { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, - { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, - { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, - - { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 }, - { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 }, - { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, - { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, - - { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, - { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, - { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, - -/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ +/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ + { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, + { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, + { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, + { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, + { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, + { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, + { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, + + { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, + { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, + { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, + + { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, + { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, + { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, + { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, + { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, + +/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ + { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, + { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, + { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, + { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, + { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, + { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, + { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, { 0xFF } }; -#define ENOUGH(v,unit) (((v)-1)/(unit)+1) -#define EZ(v,unit) ((v)?ENOUGH(v,unit):0) +#define ENOUGH(v, unit) (((v)-1)/(unit)+1) +#define EZ(v, unit) ((v)?ENOUGH(v, unit):0) static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) { - q->setup = EZ(t->setup * 1000, T); - q->act8b = EZ(t->act8b * 1000, T); - q->rec8b = EZ(t->rec8b * 1000, T); - q->cyc8b = EZ(t->cyc8b * 1000, T); - q->active = EZ(t->active * 1000, T); - q->recover = EZ(t->recover * 1000, T); - q->cycle = EZ(t->cycle * 1000, T); - q->udma = EZ(t->udma * 1000, UT); + q->setup = EZ(t->setup * 1000, T); + q->act8b = EZ(t->act8b * 1000, T); + q->rec8b = EZ(t->rec8b * 1000, T); + q->cyc8b = EZ(t->cyc8b * 1000, T); + q->active = EZ(t->active * 1000, T); + q->recover = EZ(t->recover * 1000, T); + q->dmack_hold = EZ(t->dmack_hold * 1000, T); + q->cycle = EZ(t->cycle * 1000, T); + q->udma = EZ(t->udma * 1000, UT); } void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, @@ -2020,23 +2978,31 @@ void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); + if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); } -static const struct ata_timing* ata_timing_find_mode(unsigned short speed) +const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) { - const struct ata_timing *t; + const struct ata_timing *t = ata_timing; - for (t = ata_timing; t->mode != speed; t++) - if (t->mode == 0xFF) - return NULL; - return t; + while (xfer_mode > t->mode) + t++; + + if (xfer_mode == t->mode) + return t; + + WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", + __func__, xfer_mode); + + return NULL; } int ata_timing_compute(struct ata_device *adev, unsigned short speed, struct ata_timing *t, int T, int UT) { + const u16 *id = adev->id; const struct ata_timing *s; struct ata_timing p; @@ -2054,14 +3020,18 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, * PIO/MW_DMA cycle timing. */ - if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ + if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ memset(&p, 0, sizeof(p)); - if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { - if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; - else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; - } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { - p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; - } + + if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { + if (speed <= XFER_PIO_2) + p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; + else if ((speed <= XFER_PIO_4) || + (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) + p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; + } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) + p.cycle = id[ATA_ID_EIDE_DMA_MIN]; + ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); } @@ -2077,7 +3047,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, * DMA cycle timing is slower/equal than the fastest PIO timing. */ - if (speed > XFER_PIO_4) { + if (speed > XFER_PIO_6) { ata_timing_compute(adev, adev->pio_mode, &p, T, UT); ata_timing_merge(&p, t, t, ATA_TIMING_ALL); } @@ -2096,13 +3066,70 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, t->recover = t->cycle - t->active; } + /* In a few cases quantisation may produce enough errors to + leave t->cycle too low for the sum of active and recovery + if so we must correct this */ + if (t->active + t->recover > t->cycle) + t->cycle = t->active + t->recover; + return 0; } /** + * ata_timing_cycle2mode - find xfer mode for the specified cycle duration + * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. + * @cycle: cycle duration in ns + * + * Return matching xfer mode for @cycle. The returned mode is of + * the transfer type specified by @xfer_shift. If @cycle is too + * slow for @xfer_shift, 0xff is returned. If @cycle is faster + * than the fastest known mode, the fasted mode is returned. + * + * LOCKING: + * None. + * + * RETURNS: + * Matching xfer_mode, 0xff if no match found. + */ +u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) +{ + u8 base_mode = 0xff, last_mode = 0xff; + const struct ata_xfer_ent *ent; + const struct ata_timing *t; + + for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) + if (ent->shift == xfer_shift) + base_mode = ent->base; + + for (t = ata_timing_find_mode(base_mode); + t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { + unsigned short this_cycle; + + switch (xfer_shift) { + case ATA_SHIFT_PIO: + case ATA_SHIFT_MWDMA: + this_cycle = t->cycle; + break; + case ATA_SHIFT_UDMA: + this_cycle = t->udma; + break; + default: + return 0xff; + } + + if (cycle > this_cycle) + break; + + last_mode = t->mode; + } + + return last_mode; +} + +/** * ata_down_xfermask_limit - adjust dev xfer masks downward * @dev: Device to adjust xfer masks - * @force_pio0: Force PIO0 + * @sel: ATA_DNXFER_* selector * * Adjust xfer masks of @dev downward. Note that this function * does not apply the change. Invoking ata_set_mode() afterwards @@ -2114,74 +3141,168 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, * RETURNS: * 0 on success, negative errno on failure */ -int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0) +int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) { - unsigned long xfer_mask; - int highbit; + char buf[32]; + unsigned long orig_mask, xfer_mask; + unsigned long pio_mask, mwdma_mask, udma_mask; + int quiet, highbit; - xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask, - dev->udma_mask); + quiet = !!(sel & ATA_DNXFER_QUIET); + sel &= ~ATA_DNXFER_QUIET; - if (!xfer_mask) - goto fail; - /* don't gear down to MWDMA from UDMA, go directly to PIO */ - if (xfer_mask & ATA_MASK_UDMA) - xfer_mask &= ~ATA_MASK_MWDMA; + xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, + dev->mwdma_mask, + dev->udma_mask); + ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); - highbit = fls(xfer_mask) - 1; - xfer_mask &= ~(1 << highbit); - if (force_pio0) - xfer_mask &= 1 << ATA_SHIFT_PIO; - if (!xfer_mask) - goto fail; + switch (sel) { + case ATA_DNXFER_PIO: + highbit = fls(pio_mask) - 1; + pio_mask &= ~(1 << highbit); + break; + + case ATA_DNXFER_DMA: + if (udma_mask) { + highbit = fls(udma_mask) - 1; + udma_mask &= ~(1 << highbit); + if (!udma_mask) + return -ENOENT; + } else if (mwdma_mask) { + highbit = fls(mwdma_mask) - 1; + mwdma_mask &= ~(1 << highbit); + if (!mwdma_mask) + return -ENOENT; + } + break; + + case ATA_DNXFER_40C: + udma_mask &= ATA_UDMA_MASK_40C; + break; + + case ATA_DNXFER_FORCE_PIO0: + pio_mask &= 1; + case ATA_DNXFER_FORCE_PIO: + mwdma_mask = 0; + udma_mask = 0; + break; + + default: + BUG(); + } + + xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); + + if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) + return -ENOENT; + + if (!quiet) { + if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) + snprintf(buf, sizeof(buf), "%s:%s", + ata_mode_string(xfer_mask), + ata_mode_string(xfer_mask & ATA_MASK_PIO)); + else + snprintf(buf, sizeof(buf), "%s", + ata_mode_string(xfer_mask)); + + ata_dev_warn(dev, "limiting speed to %s\n", buf); + } ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, &dev->udma_mask); - ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n", - ata_mode_string(xfer_mask)); - return 0; - - fail: - return -EINVAL; } static int ata_dev_set_mode(struct ata_device *dev) { - unsigned int err_mask; + struct ata_port *ap = dev->link->ap; + struct ata_eh_context *ehc = &dev->link->eh_context; + const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; + const char *dev_err_whine = ""; + int ign_dev_err = 0; + unsigned int err_mask = 0; int rc; dev->flags &= ~ATA_DFLAG_PIO; if (dev->xfer_shift == ATA_SHIFT_PIO) dev->flags |= ATA_DFLAG_PIO; - err_mask = ata_dev_set_xfermode(dev); - if (err_mask) { - ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " - "(err_mask=0x%x)\n", err_mask); - return -EIO; + if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) + dev_err_whine = " (SET_XFERMODE skipped)"; + else { + if (nosetxfer) + ata_dev_warn(dev, + "NOSETXFER but PATA detected - can't " + "skip SETXFER, might malfunction\n"); + err_mask = ata_dev_set_xfermode(dev); } - rc = ata_dev_revalidate(dev, 0); + if (err_mask & ~AC_ERR_DEV) + goto fail; + + /* revalidate */ + ehc->i.flags |= ATA_EHI_POST_SETMODE; + rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); + ehc->i.flags &= ~ATA_EHI_POST_SETMODE; if (rc) return rc; + if (dev->xfer_shift == ATA_SHIFT_PIO) { + /* Old CFA may refuse this command, which is just fine */ + if (ata_id_is_cfa(dev->id)) + ign_dev_err = 1; + /* Catch several broken garbage emulations plus some pre + ATA devices */ + if (ata_id_major_version(dev->id) == 0 && + dev->pio_mode <= XFER_PIO_2) + ign_dev_err = 1; + /* Some very old devices and some bad newer ones fail + any kind of SET_XFERMODE request but support PIO0-2 + timings and no IORDY */ + if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) + ign_dev_err = 1; + } + /* Early MWDMA devices do DMA but don't allow DMA mode setting. + Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ + if (dev->xfer_shift == ATA_SHIFT_MWDMA && + dev->dma_mode == XFER_MW_DMA_0 && + (dev->id[63] >> 8) & 1) + ign_dev_err = 1; + + /* if the device is actually configured correctly, ignore dev err */ + if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) + ign_dev_err = 1; + + if (err_mask & AC_ERR_DEV) { + if (!ign_dev_err) + goto fail; + else + dev_err_whine = " (device error ignored)"; + } + DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", dev->xfer_shift, (int)dev->xfer_mode); - ata_dev_printk(dev, KERN_INFO, "configured for %s\n", - ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode))); + ata_dev_info(dev, "configured for %s%s\n", + ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), + dev_err_whine); + return 0; + + fail: + ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); + return -EIO; } /** - * ata_set_mode - Program timings and issue SET FEATURES - XFER - * @ap: port on which timings will be programmed - * @r_failed_dev: out paramter for failed device + * ata_do_set_mode - Program timings and issue SET FEATURES - XFER + * @link: link on which timings will be programmed + * @r_failed_dev: out parameter for failed device * - * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If - * ata_set_mode() fails, pointer to the failing device is + * Standard implementation of the function used to tune and set + * ATA device disk transfer mode (PIO3, UDMA6, etc.). If + * ata_dev_set_mode() fails, pointer to the failing device is * returned in @r_failed_dev. * * LOCKING: @@ -2190,56 +3311,49 @@ static int ata_dev_set_mode(struct ata_device *dev) * RETURNS: * 0 on success, negative errno otherwise */ -int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) + +int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) { + struct ata_port *ap = link->ap; struct ata_device *dev; - int i, rc = 0, used_dma = 0, found = 0; - - /* has private set_mode? */ - if (ap->ops->set_mode) { - /* FIXME: make ->set_mode handle no device case and - * return error code and failing device on failure. - */ - for (i = 0; i < ATA_MAX_DEVICES; i++) { - if (ata_dev_ready(&ap->device[i])) { - ap->ops->set_mode(ap); - break; - } - } - return 0; - } + int rc = 0, used_dma = 0, found = 0; /* step 1: calculate xfer_mask */ - for (i = 0; i < ATA_MAX_DEVICES; i++) { - unsigned int pio_mask, dma_mask; + ata_for_each_dev(dev, link, ENABLED) { + unsigned long pio_mask, dma_mask; + unsigned int mode_mask; - dev = &ap->device[i]; - - if (!ata_dev_enabled(dev)) - continue; + mode_mask = ATA_DMA_MASK_ATA; + if (dev->class == ATA_DEV_ATAPI) + mode_mask = ATA_DMA_MASK_ATAPI; + else if (ata_id_is_cfa(dev->id)) + mode_mask = ATA_DMA_MASK_CFA; ata_dev_xfermask(dev); + ata_force_xfermask(dev); pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); - dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); + + if (libata_dma_mask & mode_mask) + dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, + dev->udma_mask); + else + dma_mask = 0; + dev->pio_mode = ata_xfer_mask2mode(pio_mask); dev->dma_mode = ata_xfer_mask2mode(dma_mask); found = 1; - if (dev->dma_mode) + if (ata_dma_enabled(dev)) used_dma = 1; } if (!found) goto out; /* step 2: always set host PIO timings */ - for (i = 0; i < ATA_MAX_DEVICES; i++) { - dev = &ap->device[i]; - if (!ata_dev_enabled(dev)) - continue; - - if (!dev->pio_mode) { - ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); + ata_for_each_dev(dev, link, ENABLED) { + if (dev->pio_mode == 0xff) { + ata_dev_warn(dev, "no PIO support\n"); rc = -EINVAL; goto out; } @@ -2251,10 +3365,8 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) } /* step 3: set host DMA timings */ - for (i = 0; i < ATA_MAX_DEVICES; i++) { - dev = &ap->device[i]; - - if (!ata_dev_enabled(dev) || !dev->dma_mode) + ata_for_each_dev(dev, link, ENABLED) { + if (!ata_dma_enabled(dev)) continue; dev->xfer_mode = dev->dma_mode; @@ -2264,13 +3376,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) } /* step 4: update devices' xfer mode */ - for (i = 0; i < ATA_MAX_DEVICES; i++) { - dev = &ap->device[i]; - - /* don't udpate suspended devices' xfer mode */ - if (!ata_dev_ready(dev)) - continue; - + ata_for_each_dev(dev, link, ENABLED) { rc = ata_dev_set_mode(dev); if (rc) goto out; @@ -2280,11 +3386,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) * host channels are not permitted to do so. */ if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) - ap->host->simplex_claimed = 1; - - /* step5: chip specific finalisation */ - if (ap->ops->post_set_mode) - ap->ops->post_set_mode(ap); + ap->host->simplex_claimed = ap; out: if (rc) @@ -2293,284 +3395,150 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) } /** - * ata_tf_to_host - issue ATA taskfile to host controller - * @ap: port to which command is being issued - * @tf: ATA taskfile register set + * ata_wait_ready - wait for link to become ready + * @link: link to be waited on + * @deadline: deadline jiffies for the operation + * @check_ready: callback to check link readiness * - * Issues ATA taskfile register set to ATA host controller, - * with proper synchronization with interrupt handler and - * other threads. + * Wait for @link to become ready. @check_ready should return + * positive number if @link is ready, 0 if it isn't, -ENODEV if + * link doesn't seem to be occupied, other errno for other error + * conditions. * - * LOCKING: - * spin_lock_irqsave(host lock) - */ - -static inline void ata_tf_to_host(struct ata_port *ap, - const struct ata_taskfile *tf) -{ - ap->ops->tf_load(ap, tf); - ap->ops->exec_command(ap, tf); -} - -/** - * ata_busy_sleep - sleep until BSY clears, or timeout - * @ap: port containing status register to be polled - * @tmout_pat: impatience timeout - * @tmout: overall timeout + * Transient -ENODEV conditions are allowed for + * ATA_TMOUT_FF_WAIT. * - * Sleep until ATA Status register bit BSY clears, - * or a timeout occurs. + * LOCKING: + * EH context. * - * LOCKING: None. + * RETURNS: + * 0 if @linke is ready before @deadline; otherwise, -errno. */ - -unsigned int ata_busy_sleep (struct ata_port *ap, - unsigned long tmout_pat, unsigned long tmout) +int ata_wait_ready(struct ata_link *link, unsigned long deadline, + int (*check_ready)(struct ata_link *link)) { - unsigned long timer_start, timeout; - u8 status; + unsigned long start = jiffies; + unsigned long nodev_deadline; + int warned = 0; - status = ata_busy_wait(ap, ATA_BUSY, 300); - timer_start = jiffies; - timeout = timer_start + tmout_pat; - while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { - msleep(50); - status = ata_busy_wait(ap, ATA_BUSY, 3); - } - - if (status & ATA_BUSY) - ata_port_printk(ap, KERN_WARNING, - "port is slow to respond, please be patient\n"); - - timeout = timer_start + tmout; - while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { - msleep(50); - status = ata_chk_status(ap); - } + /* choose which 0xff timeout to use, read comment in libata.h */ + if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) + nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); + else + nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); - if (status & ATA_BUSY) { - ata_port_printk(ap, KERN_ERR, "port failed to respond " - "(%lu secs)\n", tmout / HZ); - return 1; - } + /* Slave readiness can't be tested separately from master. On + * M/S emulation configuration, this function should be called + * only on the master and it will handle both master and slave. + */ + WARN_ON(link == link->ap->slave_link); - return 0; -} + if (time_after(nodev_deadline, deadline)) + nodev_deadline = deadline; -static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) -{ - struct ata_ioports *ioaddr = &ap->ioaddr; - unsigned int dev0 = devmask & (1 << 0); - unsigned int dev1 = devmask & (1 << 1); - unsigned long timeout; + while (1) { + unsigned long now = jiffies; + int ready, tmp; - /* if device 0 was found in ata_devchk, wait for its - * BSY bit to clear - */ - if (dev0) - ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + ready = tmp = check_ready(link); + if (ready > 0) + return 0; - /* if device 1 was found in ata_devchk, wait for - * register access, then wait for BSY to clear - */ - timeout = jiffies + ATA_TMOUT_BOOT; - while (dev1) { - u8 nsect, lbal; - - ap->ops->dev_select(ap, 1); - if (ap->flags & ATA_FLAG_MMIO) { - nsect = readb((void __iomem *) ioaddr->nsect_addr); - lbal = readb((void __iomem *) ioaddr->lbal_addr); - } else { - nsect = inb(ioaddr->nsect_addr); - lbal = inb(ioaddr->lbal_addr); - } - if ((nsect == 1) && (lbal == 1)) - break; - if (time_after(jiffies, timeout)) { - dev1 = 0; - break; + /* + * -ENODEV could be transient. Ignore -ENODEV if link + * is online. Also, some SATA devices take a long + * time to clear 0xff after reset. Wait for + * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't + * offline. + * + * Note that some PATA controllers (pata_ali) explode + * if status register is read more than once when + * there's no device attached. + */ + if (ready == -ENODEV) { + if (ata_link_online(link)) + ready = 0; + else if ((link->ap->flags & ATA_FLAG_SATA) && + !ata_link_offline(link) && + time_before(now, nodev_deadline)) + ready = 0; } - msleep(50); /* give drive a breather */ - } - if (dev1) - ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); - - /* is all this really necessary? */ - ap->ops->dev_select(ap, 0); - if (dev1) - ap->ops->dev_select(ap, 1); - if (dev0) - ap->ops->dev_select(ap, 0); -} -static unsigned int ata_bus_softreset(struct ata_port *ap, - unsigned int devmask) -{ - struct ata_ioports *ioaddr = &ap->ioaddr; - - DPRINTK("ata%u: bus reset via SRST\n", ap->id); + if (ready) + return ready; + if (time_after(now, deadline)) + return -EBUSY; - /* software reset. causes dev0 to be selected */ - if (ap->flags & ATA_FLAG_MMIO) { - writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); - udelay(20); /* FIXME: flush */ - writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr); - udelay(20); /* FIXME: flush */ - writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); - } else { - outb(ap->ctl, ioaddr->ctl_addr); - udelay(10); - outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr); - udelay(10); - outb(ap->ctl, ioaddr->ctl_addr); - } - - /* spec mandates ">= 2ms" before checking status. - * We wait 150ms, because that was the magic delay used for - * ATAPI devices in Hale Landis's ATADRVR, for the period of time - * between when the ATA command register is written, and then - * status is checked. Because waiting for "a while" before - * checking status is fine, post SRST, we perform this magic - * delay here as well. - * - * Old drivers/ide uses the 2mS rule and then waits for ready - */ - msleep(150); + if (!warned && time_after(now, start + 5 * HZ) && + (deadline - now > 3 * HZ)) { + ata_link_warn(link, + "link is slow to respond, please be patient " + "(ready=%d)\n", tmp); + warned = 1; + } - /* Before we perform post reset processing we want to see if - * the bus shows 0xFF because the odd clown forgets the D7 - * pulldown resistor. - */ - if (ata_check_status(ap) == 0xFF) { - ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n"); - return AC_ERR_OTHER; + ata_msleep(link->ap, 50); } - - ata_bus_post_reset(ap, devmask); - - return 0; } /** - * ata_bus_reset - reset host port and associated ATA channel - * @ap: port to reset + * ata_wait_after_reset - wait for link to become ready after reset + * @link: link to be waited on + * @deadline: deadline jiffies for the operation + * @check_ready: callback to check link readiness * - * This is typically the first time we actually start issuing - * commands to the ATA channel. We wait for BSY to clear, then - * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its - * result. Determine what devices, if any, are on the channel - * by looking at the device 0/1 error register. Look at the signature - * stored in each device's taskfile registers, to determine if - * the device is ATA or ATAPI. + * Wait for @link to become ready after reset. * * LOCKING: - * PCI/etc. bus probe sem. - * Obtains host lock. + * EH context. * - * SIDE EFFECTS: - * Sets ATA_FLAG_DISABLED if bus reset fails. + * RETURNS: + * 0 if @linke is ready before @deadline; otherwise, -errno. */ - -void ata_bus_reset(struct ata_port *ap) +int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, + int (*check_ready)(struct ata_link *link)) { - struct ata_ioports *ioaddr = &ap->ioaddr; - unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; - u8 err; - unsigned int dev0, dev1 = 0, devmask = 0; + ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); - DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no); - - /* determine if device 0/1 are present */ - if (ap->flags & ATA_FLAG_SATA_RESET) - dev0 = 1; - else { - dev0 = ata_devchk(ap, 0); - if (slave_possible) - dev1 = ata_devchk(ap, 1); - } - - if (dev0) - devmask |= (1 << 0); - if (dev1) - devmask |= (1 << 1); - - /* select device 0 again */ - ap->ops->dev_select(ap, 0); - - /* issue bus reset */ - if (ap->flags & ATA_FLAG_SRST) - if (ata_bus_softreset(ap, devmask)) - goto err_out; - - /* - * determine by signature whether we have ATA or ATAPI devices - */ - ap->device[0].class = ata_dev_try_classify(ap, 0, &err); - if ((slave_possible) && (err != 0x81)) - ap->device[1].class = ata_dev_try_classify(ap, 1, &err); - - /* re-enable interrupts */ - if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ - ata_irq_on(ap); - - /* is double-select really necessary? */ - if (ap->device[1].class != ATA_DEV_NONE) - ap->ops->dev_select(ap, 1); - if (ap->device[0].class != ATA_DEV_NONE) - ap->ops->dev_select(ap, 0); - - /* if no devices were detected, disable this port */ - if ((ap->device[0].class == ATA_DEV_NONE) && - (ap->device[1].class == ATA_DEV_NONE)) - goto err_out; - - if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { - /* set up device control for ATA_FLAG_SATA_RESET */ - if (ap->flags & ATA_FLAG_MMIO) - writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); - else - outb(ap->ctl, ioaddr->ctl_addr); - } - - DPRINTK("EXIT\n"); - return; - -err_out: - ata_port_printk(ap, KERN_ERR, "disabling port\n"); - ap->ops->port_disable(ap); - - DPRINTK("EXIT\n"); + return ata_wait_ready(link, deadline, check_ready); } /** - * sata_phy_debounce - debounce SATA phy status - * @ap: ATA port to debounce SATA phy status for + * sata_link_debounce - debounce SATA phy status + * @link: ATA link to debounce SATA phy status for * @params: timing parameters { interval, duratinon, timeout } in msec + * @deadline: deadline jiffies for the operation * - * Make sure SStatus of @ap reaches stable state, determined by + * Make sure SStatus of @link reaches stable state, determined by * holding the same value where DET is not 1 for @duration polled * every @interval, before @timeout. Timeout constraints the - * beginning of the stable state. Because, after hot unplugging, - * DET gets stuck at 1 on some controllers, this functions waits + * beginning of the stable state. Because DET gets stuck at 1 on + * some controllers after hot unplugging, this functions waits * until timeout then returns 0 if DET is stable at 1. * + * @timeout is further limited by @deadline. The sooner of the + * two is used. + * * LOCKING: * Kernel thread context (may sleep) * * RETURNS: * 0 on success, -errno on failure. */ -int sata_phy_debounce(struct ata_port *ap, const unsigned long *params) +int sata_link_debounce(struct ata_link *link, const unsigned long *params, + unsigned long deadline) { - unsigned long interval_msec = params[0]; - unsigned long duration = params[1] * HZ / 1000; - unsigned long timeout = jiffies + params[2] * HZ / 1000; - unsigned long last_jiffies; + unsigned long interval = params[0]; + unsigned long duration = params[1]; + unsigned long last_jiffies, t; u32 last, cur; int rc; - if ((rc = sata_scr_read(ap, SCR_STATUS, &cur))) + t = ata_deadline(jiffies, params[2]); + if (time_before(t, deadline)) + deadline = t; + + if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) return rc; cur &= 0xf; @@ -2578,16 +3546,17 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params) last_jiffies = jiffies; while (1) { - msleep(interval_msec); - if ((rc = sata_scr_read(ap, SCR_STATUS, &cur))) + ata_msleep(link->ap, interval); + if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) return rc; cur &= 0xf; /* DET stable? */ if (cur == last) { - if (cur == 1 && time_before(jiffies, timeout)) + if (cur == 1 && time_before(jiffies, deadline)) continue; - if (time_after(jiffies, last_jiffies + duration)) + if (time_after(jiffies, + ata_deadline(last_jiffies, duration))) return 0; continue; } @@ -2596,18 +3565,21 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params) last = cur; last_jiffies = jiffies; - /* check timeout */ - if (time_after(jiffies, timeout)) - return -EBUSY; + /* Check deadline. If debouncing failed, return + * -EPIPE to tell upper layer to lower link speed. + */ + if (time_after(jiffies, deadline)) + return -EPIPE; } } /** - * sata_phy_resume - resume SATA phy - * @ap: ATA port to resume SATA phy for + * sata_link_resume - resume SATA link + * @link: ATA link to resume SATA * @params: timing parameters { interval, duratinon, timeout } in msec + * @deadline: deadline jiffies for the operation * - * Resume SATA phy of @ap and debounce it. + * Resume SATA phy @link and debounce it. * * LOCKING: * Kernel thread context (may sleep) @@ -2615,113 +3587,139 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params) * RETURNS: * 0 on success, -errno on failure. */ -int sata_phy_resume(struct ata_port *ap, const unsigned long *params) +int sata_link_resume(struct ata_link *link, const unsigned long *params, + unsigned long deadline) { - u32 scontrol; + int tries = ATA_LINK_RESUME_TRIES; + u32 scontrol, serror; int rc; - if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) return rc; - scontrol = (scontrol & 0x0f0) | 0x300; - - if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) - return rc; - - /* Some PHYs react badly if SStatus is pounded immediately - * after resuming. Delay 200ms before debouncing. + /* + * Writes to SControl sometimes get ignored under certain + * controllers (ata_piix SIDPR). Make sure DET actually is + * cleared. */ - msleep(200); - - return sata_phy_debounce(ap, params); -} - -static void ata_wait_spinup(struct ata_port *ap) -{ - struct ata_eh_context *ehc = &ap->eh_context; - unsigned long end, secs; - int rc; + do { + scontrol = (scontrol & 0x0f0) | 0x300; + if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) + return rc; + /* + * Some PHYs react badly if SStatus is pounded + * immediately after resuming. Delay 200ms before + * debouncing. + */ + ata_msleep(link->ap, 200); - /* first, debounce phy if SATA */ - if (ap->cbl == ATA_CBL_SATA) { - rc = sata_phy_debounce(ap, sata_deb_timing_hotplug); + /* is SControl restored correctly? */ + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) + return rc; + } while ((scontrol & 0xf0f) != 0x300 && --tries); - /* if debounced successfully and offline, no need to wait */ - if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap)) - return; + if ((scontrol & 0xf0f) != 0x300) { + ata_link_warn(link, "failed to resume link (SControl %X)\n", + scontrol); + return 0; } - /* okay, let's give the drive time to spin up */ - end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000; - secs = ((end - jiffies) + HZ - 1) / HZ; + if (tries < ATA_LINK_RESUME_TRIES) + ata_link_warn(link, "link resume succeeded after %d retries\n", + ATA_LINK_RESUME_TRIES - tries); - if (time_after(jiffies, end)) - return; + if ((rc = sata_link_debounce(link, params, deadline))) + return rc; - if (secs > 5) - ata_port_printk(ap, KERN_INFO, "waiting for device to spin up " - "(%lu secs)\n", secs); + /* clear SError, some PHYs require this even for SRST to work */ + if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) + rc = sata_scr_write(link, SCR_ERROR, serror); - schedule_timeout_uninterruptible(end - jiffies); + return rc != -EINVAL ? rc : 0; } /** - * ata_std_prereset - prepare for reset - * @ap: ATA port to be reset + * sata_link_scr_lpm - manipulate SControl IPM and SPM fields + * @link: ATA link to manipulate SControl for + * @policy: LPM policy to configure + * @spm_wakeup: initiate LPM transition to active state * - * @ap is about to be reset. Initialize it. + * Manipulate the IPM field of the SControl register of @link + * according to @policy. If @policy is ATA_LPM_MAX_POWER and + * @spm_wakeup is %true, the SPM field is manipulated to wake up + * the link. This function also clears PHYRDY_CHG before + * returning. * * LOCKING: - * Kernel thread context (may sleep) + * EH context. * * RETURNS: - * 0 on success, -errno otherwise. + * 0 on succes, -errno otherwise. */ -int ata_std_prereset(struct ata_port *ap) +int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, + bool spm_wakeup) { - struct ata_eh_context *ehc = &ap->eh_context; - const unsigned long *timing = sata_ehc_deb_timing(ehc); + struct ata_eh_context *ehc = &link->eh_context; + bool woken_up = false; + u32 scontrol; int rc; - /* handle link resume & hotplug spinup */ - if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && - (ap->flags & ATA_FLAG_HRST_TO_RESUME)) - ehc->i.action |= ATA_EH_HARDRESET; - - if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) && - (ap->flags & ATA_FLAG_SKIP_D2H_BSY)) - ata_wait_spinup(ap); - - /* if we're about to do hardreset, nothing more to do */ - if (ehc->i.action & ATA_EH_HARDRESET) - return 0; + rc = sata_scr_read(link, SCR_CONTROL, &scontrol); + if (rc) + return rc; - /* if SATA, resume phy */ - if (ap->cbl == ATA_CBL_SATA) { - rc = sata_phy_resume(ap, timing); - if (rc && rc != -EOPNOTSUPP) { - /* phy resume failed */ - ata_port_printk(ap, KERN_WARNING, "failed to resume " - "link for reset (errno=%d)\n", rc); - return rc; + switch (policy) { + case ATA_LPM_MAX_POWER: + /* disable all LPM transitions */ + scontrol |= (0x7 << 8); + /* initiate transition to active state */ + if (spm_wakeup) { + scontrol |= (0x4 << 12); + woken_up = true; + } + break; + case ATA_LPM_MED_POWER: + /* allow LPM to PARTIAL */ + scontrol &= ~(0x1 << 8); + scontrol |= (0x6 << 8); + break; + case ATA_LPM_MIN_POWER: + if (ata_link_nr_enabled(link) > 0) + /* no restrictions on LPM transitions */ + scontrol &= ~(0x7 << 8); + else { + /* empty port, power off */ + scontrol &= ~0xf; + scontrol |= (0x1 << 2); } + break; + default: + WARN_ON(1); } - /* Wait for !BSY if the controller can wait for the first D2H - * Reg FIS and we don't know that no device is attached. - */ - if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) - ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + rc = sata_scr_write(link, SCR_CONTROL, scontrol); + if (rc) + return rc; - return 0; + /* give the link time to transit out of LPM state */ + if (woken_up) + msleep(10); + + /* clear PHYRDY_CHG from SError */ + ehc->i.serror &= ~SERR_PHYRDY_CHG; + return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); } /** - * ata_std_softreset - reset host port via ATA SRST - * @ap: port to reset - * @classes: resulting classes of attached devices + * ata_std_prereset - prepare for reset + * @link: ATA link to be reset + * @deadline: deadline jiffies for the operation * - * Reset host port using ATA SRST. + * @link is about to be reset. Initialize it. Failure from + * prereset makes libata abort whole reset sequence and give up + * that port, so prereset should be best-effort. It does its + * best to prepare for reset sequence but if things go wrong, it + * should just whine, not fail. * * LOCKING: * Kernel thread context (may sleep) @@ -2729,53 +3727,51 @@ int ata_std_prereset(struct ata_port *ap) * RETURNS: * 0 on success, -errno otherwise. */ -int ata_std_softreset(struct ata_port *ap, unsigned int *classes) +int ata_std_prereset(struct ata_link *link, unsigned long deadline) { - unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; - unsigned int devmask = 0, err_mask; - u8 err; - - DPRINTK("ENTER\n"); - - if (ata_port_offline(ap)) { - classes[0] = ATA_DEV_NONE; - goto out; - } - - /* determine if device 0/1 are present */ - if (ata_devchk(ap, 0)) - devmask |= (1 << 0); - if (slave_possible && ata_devchk(ap, 1)) - devmask |= (1 << 1); + struct ata_port *ap = link->ap; + struct ata_eh_context *ehc = &link->eh_context; + const unsigned long *timing = sata_ehc_deb_timing(ehc); + int rc; - /* select device 0 again */ - ap->ops->dev_select(ap, 0); + /* if we're about to do hardreset, nothing more to do */ + if (ehc->i.action & ATA_EH_HARDRESET) + return 0; - /* issue bus reset */ - DPRINTK("about to softreset, devmask=%x\n", devmask); - err_mask = ata_bus_softreset(ap, devmask); - if (err_mask) { - ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n", - err_mask); - return -EIO; + /* if SATA, resume link */ + if (ap->flags & ATA_FLAG_SATA) { + rc = sata_link_resume(link, timing, deadline); + /* whine about phy resume failure but proceed */ + if (rc && rc != -EOPNOTSUPP) + ata_link_warn(link, + "failed to resume link for reset (errno=%d)\n", + rc); } - /* determine by signature whether we have ATA or ATAPI devices */ - classes[0] = ata_dev_try_classify(ap, 0, &err); - if (slave_possible && err != 0x81) - classes[1] = ata_dev_try_classify(ap, 1, &err); + /* no point in trying softreset on offline link */ + if (ata_phys_link_offline(link)) + ehc->i.action &= ~ATA_EH_SOFTRESET; - out: - DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); return 0; } /** - * sata_std_hardreset - reset host port via SATA phy reset - * @ap: port to reset - * @class: resulting class of attached device - * - * SATA phy-reset host port using DET bits of SControl register. + * sata_link_hardreset - reset link via SATA phy reset + * @link: link to reset + * @timing: timing parameters { interval, duratinon, timeout } in msec + * @deadline: deadline jiffies for the operation + * @online: optional out parameter indicating link onlineness + * @check_ready: optional callback to check link readiness + * + * SATA phy-reset @link using DET bits of SControl register. + * After hardreset, link readiness is waited upon using + * ata_wait_ready() if @check_ready is specified. LLDs are + * allowed to not specify @check_ready and wait itself after this + * function returns. Device classification is LLD's + * responsibility. + * + * *@online is set to one iff reset succeeded and @link is online + * after reset. * * LOCKING: * Kernel thread context (may sleep) @@ -2783,73 +3779,123 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes) * RETURNS: * 0 on success, -errno otherwise. */ -int sata_std_hardreset(struct ata_port *ap, unsigned int *class) +int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, + unsigned long deadline, + bool *online, int (*check_ready)(struct ata_link *)) { - struct ata_eh_context *ehc = &ap->eh_context; - const unsigned long *timing = sata_ehc_deb_timing(ehc); u32 scontrol; int rc; DPRINTK("ENTER\n"); - if (sata_set_spd_needed(ap)) { + if (online) + *online = false; + + if (sata_set_spd_needed(link)) { /* SATA spec says nothing about how to reconfigure * spd. To be on the safe side, turn off phy during * reconfiguration. This works for at least ICH7 AHCI * and Sil3124. */ - if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) - return rc; + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) + goto out; scontrol = (scontrol & 0x0f0) | 0x304; - if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) - return rc; + if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) + goto out; - sata_set_spd(ap); + sata_set_spd(link); } /* issue phy wake/reset */ - if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) - return rc; + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) + goto out; scontrol = (scontrol & 0x0f0) | 0x301; - if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol))) - return rc; + if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) + goto out; /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 * 10.4.2 says at least 1 ms. */ - msleep(1); + ata_msleep(link->ap, 1); - /* bring phy back */ - sata_phy_resume(ap, timing); + /* bring link back */ + rc = sata_link_resume(link, timing, deadline); + if (rc) + goto out; + /* if link is offline nothing more to do */ + if (ata_phys_link_offline(link)) + goto out; - /* TODO: phy layer with polling, timeouts, etc. */ - if (ata_port_offline(ap)) { - *class = ATA_DEV_NONE; - DPRINTK("EXIT, link offline\n"); - return 0; - } + /* Link is online. From this point, -ENODEV too is an error. */ + if (online) + *online = true; - if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { - ata_port_printk(ap, KERN_ERR, - "COMRESET failed (device not ready)\n"); - return -EIO; + if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { + /* If PMP is supported, we have to do follow-up SRST. + * Some PMPs don't send D2H Reg FIS after hardreset if + * the first port is empty. Wait only for + * ATA_TMOUT_PMP_SRST_WAIT. + */ + if (check_ready) { + unsigned long pmp_deadline; + + pmp_deadline = ata_deadline(jiffies, + ATA_TMOUT_PMP_SRST_WAIT); + if (time_after(pmp_deadline, deadline)) + pmp_deadline = deadline; + ata_wait_ready(link, pmp_deadline, check_ready); + } + rc = -EAGAIN; + goto out; } - ap->ops->dev_select(ap, 0); /* probably unnecessary */ + rc = 0; + if (check_ready) + rc = ata_wait_ready(link, deadline, check_ready); + out: + if (rc && rc != -EAGAIN) { + /* online is set iff link is online && reset succeeded */ + if (online) + *online = false; + ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); + } + DPRINTK("EXIT, rc=%d\n", rc); + return rc; +} - *class = ata_dev_try_classify(ap, 0, NULL); +/** + * sata_std_hardreset - COMRESET w/o waiting or classification + * @link: link to reset + * @class: resulting class of attached device + * @deadline: deadline jiffies for the operation + * + * Standard SATA COMRESET w/o waiting or classification. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 if link offline, -EAGAIN if link online, -errno on errors. + */ +int sata_std_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); + bool online; + int rc; - DPRINTK("EXIT, class=%u\n", *class); - return 0; + /* do hardreset */ + rc = sata_link_hardreset(link, timing, deadline, &online, NULL); + return online ? -EAGAIN : rc; } /** * ata_std_postreset - standard postreset callback - * @ap: the target ata_port + * @link: the target ata_link * @classes: classes of attached devices * * This function is invoked after a successful reset. Note that @@ -2859,45 +3905,18 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class) * LOCKING: * Kernel thread context (may sleep) */ -void ata_std_postreset(struct ata_port *ap, unsigned int *classes) +void ata_std_postreset(struct ata_link *link, unsigned int *classes) { u32 serror; DPRINTK("ENTER\n"); - /* print link status */ - sata_print_link_status(ap); - - /* clear SError */ - if (sata_scr_read(ap, SCR_ERROR, &serror) == 0) - sata_scr_write(ap, SCR_ERROR, serror); - - /* re-enable interrupts */ - if (!ap->ops->error_handler) { - /* FIXME: hack. create a hook instead */ - if (ap->ioaddr.ctl_addr) - ata_irq_on(ap); - } + /* reset complete, clear SError */ + if (!sata_scr_read(link, SCR_ERROR, &serror)) + sata_scr_write(link, SCR_ERROR, serror); - /* is double-select really necessary? */ - if (classes[0] != ATA_DEV_NONE) - ap->ops->dev_select(ap, 1); - if (classes[1] != ATA_DEV_NONE) - ap->ops->dev_select(ap, 0); - - /* bail out if no device is present */ - if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { - DPRINTK("EXIT, no device\n"); - return; - } - - /* set up device control */ - if (ap->ioaddr.ctl_addr) { - if (ap->flags & ATA_FLAG_MMIO) - writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr); - else - outb(ap->ctl, ap->ioaddr.ctl_addr); - } + /* print link status */ + sata_print_link_status(link); DPRINTK("EXIT\n"); } @@ -2922,38 +3941,29 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, const u16 *new_id) { const u16 *old_id = dev->id; - unsigned char model[2][41], serial[2][21]; - u64 new_n_sectors; + unsigned char model[2][ATA_ID_PROD_LEN + 1]; + unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; if (dev->class != new_class) { - ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", - dev->class, new_class); + ata_dev_info(dev, "class mismatch %d != %d\n", + dev->class, new_class); return 0; } - ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0])); - ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1])); - ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0])); - ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1])); - new_n_sectors = ata_id_n_sectors(new_id); + ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); + ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); + ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); + ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); if (strcmp(model[0], model[1])) { - ata_dev_printk(dev, KERN_INFO, "model number mismatch " - "'%s' != '%s'\n", model[0], model[1]); + ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", + model[0], model[1]); return 0; } if (strcmp(serial[0], serial[1])) { - ata_dev_printk(dev, KERN_INFO, "serial number mismatch " - "'%s' != '%s'\n", serial[0], serial[1]); - return 0; - } - - if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) { - ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " - "%llu != %llu\n", - (unsigned long long)dev->n_sectors, - (unsigned long long)new_n_sectors); + ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", + serial[0], serial[1]); return 0; } @@ -2961,9 +3971,9 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, } /** - * ata_dev_revalidate - Revalidate ATA device - * @dev: device to revalidate - * @post_reset: is this revalidation after reset? + * ata_dev_reread_id - Re-read IDENTIFY data + * @dev: target ATA device + * @readid_flags: read ID flags * * Re-read IDENTIFY page and make sure @dev is still attached to * the port. @@ -2974,116 +3984,436 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, * RETURNS: * 0 on success, negative errno otherwise */ -int ata_dev_revalidate(struct ata_device *dev, int post_reset) +int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) { unsigned int class = dev->class; - u16 *id = (void *)dev->ap->sector_buf; + u16 *id = (void *)dev->link->ap->sector_buf; int rc; - if (!ata_dev_enabled(dev)) { - rc = -ENODEV; - goto fail; - } - /* read ID data */ - rc = ata_dev_read_id(dev, &class, post_reset, id); + rc = ata_dev_read_id(dev, &class, readid_flags, id); if (rc) - goto fail; + return rc; /* is the device still there? */ - if (!ata_dev_same_device(dev, class, id)) { + if (!ata_dev_same_device(dev, class, id)) + return -ENODEV; + + memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); + return 0; +} + +/** + * ata_dev_revalidate - Revalidate ATA device + * @dev: device to revalidate + * @new_class: new class code + * @readid_flags: read ID flags + * + * Re-read IDENTIFY page, make sure @dev is still attached to the + * port and reconfigure it according to the new IDENTIFY page. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, negative errno otherwise + */ +int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, + unsigned int readid_flags) +{ + u64 n_sectors = dev->n_sectors; + u64 n_native_sectors = dev->n_native_sectors; + int rc; + + if (!ata_dev_enabled(dev)) + return -ENODEV; + + /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ + if (ata_class_enabled(new_class) && + new_class != ATA_DEV_ATA && + new_class != ATA_DEV_ATAPI && + new_class != ATA_DEV_SEMB) { + ata_dev_info(dev, "class mismatch %u != %u\n", + dev->class, new_class); rc = -ENODEV; goto fail; } - memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); + /* re-read ID */ + rc = ata_dev_reread_id(dev, readid_flags); + if (rc) + goto fail; /* configure device according to the new ID */ - rc = ata_dev_configure(dev, 0); - if (rc == 0) + rc = ata_dev_configure(dev); + if (rc) + goto fail; + + /* verify n_sectors hasn't changed */ + if (dev->class != ATA_DEV_ATA || !n_sectors || + dev->n_sectors == n_sectors) return 0; + /* n_sectors has changed */ + ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", + (unsigned long long)n_sectors, + (unsigned long long)dev->n_sectors); + + /* + * Something could have caused HPA to be unlocked + * involuntarily. If n_native_sectors hasn't changed and the + * new size matches it, keep the device. + */ + if (dev->n_native_sectors == n_native_sectors && + dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { + ata_dev_warn(dev, + "new n_sectors matches native, probably " + "late HPA unlock, n_sectors updated\n"); + /* use the larger n_sectors */ + return 0; + } + + /* + * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try + * unlocking HPA in those cases. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=15396 + */ + if (dev->n_native_sectors == n_native_sectors && + dev->n_sectors < n_sectors && n_sectors == n_native_sectors && + !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { + ata_dev_warn(dev, + "old n_sectors matches native, probably " + "late HPA lock, will try to unlock HPA\n"); + /* try unlocking HPA */ + dev->flags |= ATA_DFLAG_UNLOCK_HPA; + rc = -EIO; + } else + rc = -ENODEV; + + /* restore original n_[native_]sectors and fail */ + dev->n_native_sectors = n_native_sectors; + dev->n_sectors = n_sectors; fail: - ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); + ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); return rc; } -static const char * const ata_dma_blacklist [] = { - "WDC AC11000H", NULL, - "WDC AC22100H", NULL, - "WDC AC32500H", NULL, - "WDC AC33100H", NULL, - "WDC AC31600H", NULL, - "WDC AC32100H", "24.09P07", - "WDC AC23200L", "21.10N21", - "Compaq CRD-8241B", NULL, - "CRD-8400B", NULL, - "CRD-8480B", NULL, - "CRD-8482B", NULL, - "CRD-84", NULL, - "SanDisk SDP3B", NULL, - "SanDisk SDP3B-64", NULL, - "SANYO CD-ROM CRD", NULL, - "HITACHI CDR-8", NULL, - "HITACHI CDR-8335", NULL, - "HITACHI CDR-8435", NULL, - "Toshiba CD-ROM XM-6202B", NULL, - "TOSHIBA CD-ROM XM-1702BC", NULL, - "CD-532E-A", NULL, - "E-IDE CD-ROM CR-840", NULL, - "CD-ROM Drive/F5A", NULL, - "WPI CDD-820", NULL, - "SAMSUNG CD-ROM SC-148C", NULL, - "SAMSUNG CD-ROM SC", NULL, - "SanDisk SDP3B-64", NULL, - "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL, - "_NEC DV5800A", NULL, - "SAMSUNG CD-ROM SN-124", "N001" +struct ata_blacklist_entry { + const char *model_num; + const char *model_rev; + unsigned long horkage; }; -static int ata_strim(char *s, size_t len) +static const struct ata_blacklist_entry ata_device_blacklist [] = { + /* Devices with DMA related problems under Linux */ + { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, + { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, + { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, + { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, + { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, + { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, + { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, + { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, + { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, + { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, + { "CRD-84", NULL, ATA_HORKAGE_NODMA }, + { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, + { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, + { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, + { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, + { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, + { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, + { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, + { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, + { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, + { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, + { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, + { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, + { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, + { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, + { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, + { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, + { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, + { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, + /* Odd clown on sil3726/4726 PMPs */ + { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, + + /* Weird ATAPI devices */ + { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, + { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, + { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, + { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, + + /* Devices we expect to fail diagnostics */ + + /* Devices where NCQ should be avoided */ + /* NCQ is slow */ + { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, + { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, + /* http://thread.gmane.org/gmane.linux.ide/14907 */ + { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, + /* NCQ is broken */ + { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, + { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, + { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, + { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, + { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, + + /* Seagate NCQ + FLUSH CACHE firmware bug */ + { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ + { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + + /* Blacklist entries taken from Silicon Image 3124/3132 + Windows driver .inf file - also several Linux problem reports */ + { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, + { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, + { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, + + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, + + /* devices which puke on READ_NATIVE_MAX */ + { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, + { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, + { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, + { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, + + /* this one allows HPA unlocking but fails IOs on the area */ + { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, + + /* Devices which report 1 sector over size HPA */ + { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, + { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, + { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, + + /* Devices which get the IVB wrong */ + { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, + /* Maybe we should just blacklist TSSTcorp... */ + { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, + + /* Devices that do not need bridging limits applied */ + { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, + { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, + + /* Devices which aren't very happy with higher link speeds */ + { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, + { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, + + /* + * Devices which choke on SETXFER. Applies only if both the + * device and controller are SATA. + */ + { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, + { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, + { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, + { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, + { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + + /* devices that don't properly handle queued TRIM commands */ + { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + + /* + * Some WD SATA-I drives spin up and down erratically when the link + * is put into the slumber mode. We don't have full list of the + * affected devices. Disable LPM if the device matches one of the + * known prefixes and is SATA-1. As a side effect LPM partial is + * lost too. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=57211 + */ + { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + + /* End Marker */ + { } +}; + +/** + * glob_match - match a text string against a glob-style pattern + * @text: the string to be examined + * @pattern: the glob-style pattern to be matched against + * + * Either/both of text and pattern can be empty strings. + * + * Match text against a glob-style pattern, with wildcards and simple sets: + * + * ? matches any single character. + * * matches any run of characters. + * [xyz] matches a single character from the set: x, y, or z. + * [a-d] matches a single character from the range: a, b, c, or d. + * [a-d0-9] matches a single character from either range. + * + * The special characters ?, [, -, or *, can be matched using a set, eg. [*] + * Behaviour with malformed patterns is undefined, though generally reasonable. + * + * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx" + * + * This function uses one level of recursion per '*' in pattern. + * Since it calls _nothing_ else, and has _no_ explicit local variables, + * this will not cause stack problems for any reasonable use here. + * + * RETURNS: + * 0 on match, 1 otherwise. + */ +static int glob_match (const char *text, const char *pattern) { - len = strnlen(s, len); + do { + /* Match single character or a '?' wildcard */ + if (*text == *pattern || *pattern == '?') { + if (!*pattern++) + return 0; /* End of both strings: match */ + } else { + /* Match single char against a '[' bracketed ']' pattern set */ + if (!*text || *pattern != '[') + break; /* Not a pattern set */ + while (*++pattern && *pattern != ']' && *text != *pattern) { + if (*pattern == '-' && *(pattern - 1) != '[') + if (*text > *(pattern - 1) && *text < *(pattern + 1)) { + ++pattern; + break; + } + } + if (!*pattern || *pattern == ']') + return 1; /* No match */ + while (*pattern && *pattern++ != ']'); + } + } while (*++text && *pattern); + + /* Match any run of chars against a '*' wildcard */ + if (*pattern == '*') { + if (!*++pattern) + return 0; /* Match: avoid recursion at end of pattern */ + /* Loop to handle additional pattern chars after the wildcard */ + while (*text) { + if (glob_match(text, pattern) == 0) + return 0; /* Remainder matched */ + ++text; /* Absorb (match) this char and try again */ + } + } + if (!*text && !*pattern) + return 0; /* End of both strings: match */ + return 1; /* No match */ +} - /* ATAPI specifies that empty space is blank-filled; remove blanks */ - while ((len > 0) && (s[len - 1] == ' ')) { - len--; - s[len] = 0; +static unsigned long ata_dev_blacklisted(const struct ata_device *dev) +{ + unsigned char model_num[ATA_ID_PROD_LEN + 1]; + unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; + const struct ata_blacklist_entry *ad = ata_device_blacklist; + + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); + ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); + + while (ad->model_num) { + if (!glob_match(model_num, ad->model_num)) { + if (ad->model_rev == NULL) + return ad->horkage; + if (!glob_match(model_rev, ad->model_rev)) + return ad->horkage; + } + ad++; } - return len; + return 0; } static int ata_dma_blacklisted(const struct ata_device *dev) { - unsigned char model_num[40]; - unsigned char model_rev[16]; - unsigned int nlen, rlen; - int i; - /* We don't support polling DMA. * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) * if the LLDD handles only interrupts in the HSM_ST_LAST state. */ - if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) && + if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && (dev->flags & ATA_DFLAG_CDB_INTR)) return 1; + return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; +} + +/** + * ata_is_40wire - check drive side detection + * @dev: device + * + * Perform drive side detection decoding, allowing for device vendors + * who can't follow the documentation. + */ + +static int ata_is_40wire(struct ata_device *dev) +{ + if (dev->horkage & ATA_HORKAGE_IVB) + return ata_drive_40wire_relaxed(dev->id); + return ata_drive_40wire(dev->id); +} + +/** + * cable_is_40wire - 40/80/SATA decider + * @ap: port to consider + * + * This function encapsulates the policy for speed management + * in one place. At the moment we don't cache the result but + * there is a good case for setting ap->cbl to the result when + * we are called with unknown cables (and figuring out if it + * impacts hotplug at all). + * + * Return 1 if the cable appears to be 40 wire. + */ + +static int cable_is_40wire(struct ata_port *ap) +{ + struct ata_link *link; + struct ata_device *dev; - ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, - sizeof(model_num)); - ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS, - sizeof(model_rev)); - nlen = ata_strim(model_num, sizeof(model_num)); - rlen = ata_strim(model_rev, sizeof(model_rev)); - - for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) { - if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) { - if (ata_dma_blacklist[i+1] == NULL) - return 1; - if (!strncmp(ata_dma_blacklist[i], model_rev, rlen)) - return 1; + /* If the controller thinks we are 40 wire, we are. */ + if (ap->cbl == ATA_CBL_PATA40) + return 1; + + /* If the controller thinks we are 80 wire, we are. */ + if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) + return 0; + + /* If the system is known to be 40 wire short cable (eg + * laptop), then we allow 80 wire modes even if the drive + * isn't sure. + */ + if (ap->cbl == ATA_CBL_PATA40_SHORT) + return 0; + + /* If the controller doesn't know, we scan. + * + * Note: We look for all 40 wire detects at this point. Any + * 80 wire detect is taken to be 80 wire cable because + * - in many setups only the one drive (slave if present) will + * give a valid detect + * - if you have a non detect capable drive you don't want it + * to colour the choice + */ + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ENABLED) { + if (!ata_is_40wire(dev)) + return 0; } } - return 0; + return 1; } /** @@ -3100,7 +4430,8 @@ static int ata_dma_blacklisted(const struct ata_device *dev) */ static void ata_dev_xfermask(struct ata_device *dev) { - struct ata_port *ap = dev->ap; + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; struct ata_host *host = ap->host; unsigned long xfer_mask; @@ -3108,12 +4439,7 @@ static void ata_dev_xfermask(struct ata_device *dev) xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, ap->udma_mask); - /* Apply cable rule here. Don't apply it early because when - * we handle hot plug the cable type can itself change. - */ - if (ap->cbl == ATA_CBL_PATA40) - xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); - + /* drive modes available */ xfer_mask &= ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask, dev->udma_mask); xfer_mask &= ata_id_xfermask(dev->id); @@ -3131,18 +4457,38 @@ static void ata_dev_xfermask(struct ata_device *dev) if (ata_dma_blacklisted(dev)) { xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); - ata_dev_printk(dev, KERN_WARNING, - "device is on DMA blacklist, disabling DMA\n"); + ata_dev_warn(dev, + "device is on DMA blacklist, disabling DMA\n"); } - if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) { + if ((host->flags & ATA_HOST_SIMPLEX) && + host->simplex_claimed && host->simplex_claimed != ap) { xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); - ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " - "other device, disabling DMA\n"); + ata_dev_warn(dev, + "simplex DMA is claimed by other device, disabling DMA\n"); } + if (ap->flags & ATA_FLAG_NO_IORDY) + xfer_mask &= ata_pio_mask_no_iordy(dev); + if (ap->ops->mode_filter) - xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask); + xfer_mask = ap->ops->mode_filter(dev, xfer_mask); + + /* Apply cable rule here. Don't apply it early because when + * we handle hot plug the cable type can itself change. + * Check this last so that we know if the transfer rate was + * solely limited by the cable. + * Unknown or 80 wire cables reported host side are checked + * drive side as well. Cases where we know a 40wire cable + * is used safely for 80 are not checked here. + */ + if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) + /* UDMA/44 or higher would be available */ + if (cable_is_40wire(ap)) { + ata_dev_warn(dev, + "limited to UDMA/33 due to 40-wire cable\n"); + xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); + } ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, &dev->udma_mask); @@ -3170,18 +4516,65 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) /* set up set-features taskfile */ DPRINTK("set features - xfer mode\n"); + /* Some controllers and ATAPI devices show flaky interrupt + * behavior after setting xfer mode. Use polling instead. + */ ata_tf_init(dev, &tf); tf.command = ATA_CMD_SET_FEATURES; tf.feature = SETFEATURES_XFER; + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; + tf.protocol = ATA_PROT_NODATA; + /* If we are using IORDY we must send the mode setting command */ + if (ata_pio_need_iordy(dev)) + tf.nsect = dev->xfer_mode; + /* If the device has IORDY and the controller does not - turn it off */ + else if (ata_id_has_iordy(dev->id)) + tf.nsect = 0x01; + else /* In the ancient relic department - skip all of this */ + return 0; + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + + DPRINTK("EXIT, err_mask=%x\n", err_mask); + return err_mask; +} + +/** + * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES + * @dev: Device to which command will be sent + * @enable: Whether to enable or disable the feature + * @feature: The sector count represents the feature to set + * + * Issue SET FEATURES - SATA FEATURES command to device @dev + * on port @ap with sector count + * + * LOCKING: + * PCI/etc. bus probe sem. + * + * RETURNS: + * 0 on success, AC_ERR_* mask otherwise. + */ +unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) +{ + struct ata_taskfile tf; + unsigned int err_mask; + + /* set up set-features taskfile */ + DPRINTK("set features - SATA features\n"); + + ata_tf_init(dev, &tf); + tf.command = ATA_CMD_SET_FEATURES; + tf.feature = enable; tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_NODATA; - tf.nsect = dev->xfer_mode; + tf.nsect = feature; - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } +EXPORT_SYMBOL_GPL(ata_dev_set_feature); /** * ata_dev_init_params - Issue INIT DEV PARAMS command @@ -3215,7 +4608,12 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, tf.nsect = sectors; tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + /* A clean abort indicates an original or just out of spec drive + and we should continue as we issue the setup based on the + drive reported working geometry */ + if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) + err_mask = 0; DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; @@ -3230,109 +4628,25 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, * LOCKING: * spin_lock_irqsave(host lock) */ - -static void ata_sg_clean(struct ata_queued_cmd *qc) +void ata_sg_clean(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - struct scatterlist *sg = qc->__sg; + struct scatterlist *sg = qc->sg; int dir = qc->dma_dir; - void *pad_buf = NULL; - - WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); - WARN_ON(sg == NULL); - if (qc->flags & ATA_QCFLAG_SINGLE) - WARN_ON(qc->n_elem > 1); + WARN_ON_ONCE(sg == NULL); VPRINTK("unmapping %u sg elements\n", qc->n_elem); - /* if we padded the buffer out to 32-bit bound, and data - * xfer direction is from-device, we must copy from the - * pad buffer back into the supplied buffer - */ - if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) - pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); - - if (qc->flags & ATA_QCFLAG_SG) { - if (qc->n_elem) - dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); - /* restore last sg */ - sg[qc->orig_n_elem - 1].length += qc->pad_len; - if (pad_buf) { - struct scatterlist *psg = &qc->pad_sgent; - void *addr = kmap_atomic(psg->page, KM_IRQ0); - memcpy(addr + psg->offset, pad_buf, qc->pad_len); - kunmap_atomic(addr, KM_IRQ0); - } - } else { - if (qc->n_elem) - dma_unmap_single(ap->dev, - sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), - dir); - /* restore sg */ - sg->length += qc->pad_len; - if (pad_buf) - memcpy(qc->buf_virt + sg->length - qc->pad_len, - pad_buf, qc->pad_len); - } + if (qc->n_elem) + dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); qc->flags &= ~ATA_QCFLAG_DMAMAP; - qc->__sg = NULL; + qc->sg = NULL; } /** - * ata_fill_sg - Fill PCI IDE PRD table - * @qc: Metadata associated with taskfile to be transferred - * - * Fill PCI IDE PRD (scatter-gather) table with segments - * associated with the current disk command. - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - */ -static void ata_fill_sg(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - struct scatterlist *sg; - unsigned int idx; - - WARN_ON(qc->__sg == NULL); - WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); - - idx = 0; - ata_for_each_sg(sg, qc) { - u32 addr, offset; - u32 sg_len, len; - - /* determine if physical DMA addr spans 64K boundary. - * Note h/w doesn't support 64-bit, so we unconditionally - * truncate dma_addr_t to u32. - */ - addr = (u32) sg_dma_address(sg); - sg_len = sg_dma_len(sg); - - while (sg_len) { - offset = addr & 0xffff; - len = sg_len; - if ((offset + sg_len) > 0x10000) - len = 0x10000 - offset; - - ap->prd[idx].addr = cpu_to_le32(addr); - ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); - - idx++; - sg_len -= len; - addr += len; - } - } - - if (idx) - ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); -} -/** - * ata_check_atapi_dma - Check whether ATAPI DMA can be supported + * atapi_check_dma - Check whether ATAPI DMA can be supported * @qc: Metadata associated with taskfile to check * * Allow low-level driver to filter ATA PACKET commands, returning @@ -3345,65 +4659,55 @@ static void ata_fill_sg(struct ata_queued_cmd *qc) * RETURNS: 0 when ATAPI DMA can be used * nonzero otherwise */ -int ata_check_atapi_dma(struct ata_queued_cmd *qc) +int atapi_check_dma(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - int rc = 0; /* Assume ATAPI DMA is OK by default */ - if (ap->ops->check_atapi_dma) - rc = ap->ops->check_atapi_dma(qc); + /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a + * few ATAPI devices choke on such DMA requests. + */ + if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && + unlikely(qc->nbytes & 15)) + return 1; - return rc; -} -/** - * ata_qc_prep - Prepare taskfile for submission - * @qc: Metadata associated with taskfile to be prepared - * - * Prepare ATA taskfile for submission. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_qc_prep(struct ata_queued_cmd *qc) -{ - if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + if (ap->ops->check_atapi_dma) + return ap->ops->check_atapi_dma(qc); - ata_fill_sg(qc); + return 0; } -void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } - /** - * ata_sg_init_one - Associate command with memory buffer - * @qc: Command to be associated - * @buf: Memory buffer - * @buflen: Length of memory buffer, in bytes. + * ata_std_qc_defer - Check whether a qc needs to be deferred + * @qc: ATA command in question * - * Initialize the data-related elements of queued_cmd @qc - * to point to a single memory buffer, @buf of byte length @buflen. + * Non-NCQ commands cannot run with any other command, NCQ or + * not. As upper layer only knows the queue depth, we are + * responsible for maintaining exclusion. This function checks + * whether a new command @qc can be issued. * * LOCKING: * spin_lock_irqsave(host lock) + * + * RETURNS: + * ATA_DEFER_* if deferring is needed, 0 otherwise. */ - -void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) +int ata_std_qc_defer(struct ata_queued_cmd *qc) { - struct scatterlist *sg; + struct ata_link *link = qc->dev->link; - qc->flags |= ATA_QCFLAG_SINGLE; - - memset(&qc->sgent, 0, sizeof(qc->sgent)); - qc->__sg = &qc->sgent; - qc->n_elem = 1; - qc->orig_n_elem = 1; - qc->buf_virt = buf; - qc->nbytes = buflen; + if (qc->tf.protocol == ATA_PROT_NCQ) { + if (!ata_tag_valid(link->active_tag)) + return 0; + } else { + if (!ata_tag_valid(link->active_tag) && !link->sactive) + return 0; + } - sg = qc->__sg; - sg_init_one(sg, buf, buflen); + return ATA_DEFER_LINK; } +void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } + /** * ata_sg_init - Associate command with scatter-gather table. * @qc: Command to be associated @@ -3417,83 +4721,12 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) * LOCKING: * spin_lock_irqsave(host lock) */ - void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem) { - qc->flags |= ATA_QCFLAG_SG; - qc->__sg = sg; + qc->sg = sg; qc->n_elem = n_elem; - qc->orig_n_elem = n_elem; -} - -/** - * ata_sg_setup_one - DMA-map the memory buffer associated with a command. - * @qc: Command with memory buffer to be mapped. - * - * DMA-map the memory buffer associated with queued_cmd @qc. - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - * RETURNS: - * Zero on success, negative on error. - */ - -static int ata_sg_setup_one(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - int dir = qc->dma_dir; - struct scatterlist *sg = qc->__sg; - dma_addr_t dma_address; - int trim_sg = 0; - - /* we must lengthen transfers to end on a 32-bit boundary */ - qc->pad_len = sg->length & 3; - if (qc->pad_len) { - void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); - struct scatterlist *psg = &qc->pad_sgent; - - WARN_ON(qc->dev->class != ATA_DEV_ATAPI); - - memset(pad_buf, 0, ATA_DMA_PAD_SZ); - - if (qc->tf.flags & ATA_TFLAG_WRITE) - memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, - qc->pad_len); - - sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); - sg_dma_len(psg) = ATA_DMA_PAD_SZ; - /* trim sg */ - sg->length -= qc->pad_len; - if (sg->length == 0) - trim_sg = 1; - - DPRINTK("padding done, sg->length=%u pad_len=%u\n", - sg->length, qc->pad_len); - } - - if (trim_sg) { - qc->n_elem--; - goto skip_map; - } - - dma_address = dma_map_single(ap->dev, qc->buf_virt, - sg->length, dir); - if (dma_mapping_error(dma_address)) { - /* restore sg */ - sg->length += qc->pad_len; - return -1; - } - - sg_dma_address(sg) = dma_address; - sg_dma_len(sg) = sg->length; - -skip_map: - DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), - qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); - - return 0; + qc->cursg = qc->sg; } /** @@ -3509,74 +4742,21 @@ skip_map: * Zero on success, negative on error. * */ - static int ata_sg_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - struct scatterlist *sg = qc->__sg; - struct scatterlist *lsg = &sg[qc->n_elem - 1]; - int n_elem, pre_n_elem, dir, trim_sg = 0; - - VPRINTK("ENTER, ata%u\n", ap->id); - WARN_ON(!(qc->flags & ATA_QCFLAG_SG)); - - /* we must lengthen transfers to end on a 32-bit boundary */ - qc->pad_len = lsg->length & 3; - if (qc->pad_len) { - void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); - struct scatterlist *psg = &qc->pad_sgent; - unsigned int offset; + unsigned int n_elem; - WARN_ON(qc->dev->class != ATA_DEV_ATAPI); - - memset(pad_buf, 0, ATA_DMA_PAD_SZ); - - /* - * psg->page/offset are used to copy to-be-written - * data in this function or read data in ata_sg_clean. - */ - offset = lsg->offset + lsg->length - qc->pad_len; - psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); - psg->offset = offset_in_page(offset); - - if (qc->tf.flags & ATA_TFLAG_WRITE) { - void *addr = kmap_atomic(psg->page, KM_IRQ0); - memcpy(pad_buf, addr + psg->offset, qc->pad_len); - kunmap_atomic(addr, KM_IRQ0); - } + VPRINTK("ENTER, ata%u\n", ap->print_id); - sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); - sg_dma_len(psg) = ATA_DMA_PAD_SZ; - /* trim last sg */ - lsg->length -= qc->pad_len; - if (lsg->length == 0) - trim_sg = 1; - - DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", - qc->n_elem - 1, lsg->length, qc->pad_len); - } - - pre_n_elem = qc->n_elem; - if (trim_sg && pre_n_elem) - pre_n_elem--; - - if (!pre_n_elem) { - n_elem = 0; - goto skip_map; - } - - dir = qc->dma_dir; - n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir); - if (n_elem < 1) { - /* restore last sg */ - lsg->length += qc->pad_len; + n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); + if (n_elem < 1) return -1; - } DPRINTK("%d sg elements mapped\n", n_elem); - -skip_map: + qc->orig_n_elem = qc->n_elem; qc->n_elem = n_elem; + qc->flags |= ATA_QCFLAG_DMAMAP; return 0; } @@ -3604,732 +4784,12 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) } /** - * ata_mmio_data_xfer - Transfer data by MMIO - * @adev: device for this I/O - * @buf: data buffer - * @buflen: buffer length - * @write_data: read/write - * - * Transfer data from/to the device data register by MMIO. - * - * LOCKING: - * Inherited from caller. - */ - -void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, - unsigned int buflen, int write_data) -{ - struct ata_port *ap = adev->ap; - unsigned int i; - unsigned int words = buflen >> 1; - u16 *buf16 = (u16 *) buf; - void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr; - - /* Transfer multiple of 2 bytes */ - if (write_data) { - for (i = 0; i < words; i++) - writew(le16_to_cpu(buf16[i]), mmio); - } else { - for (i = 0; i < words; i++) - buf16[i] = cpu_to_le16(readw(mmio)); - } - - /* Transfer trailing 1 byte, if any. */ - if (unlikely(buflen & 0x01)) { - u16 align_buf[1] = { 0 }; - unsigned char *trailing_buf = buf + buflen - 1; - - if (write_data) { - memcpy(align_buf, trailing_buf, 1); - writew(le16_to_cpu(align_buf[0]), mmio); - } else { - align_buf[0] = cpu_to_le16(readw(mmio)); - memcpy(trailing_buf, align_buf, 1); - } - } -} - -/** - * ata_pio_data_xfer - Transfer data by PIO - * @adev: device to target - * @buf: data buffer - * @buflen: buffer length - * @write_data: read/write - * - * Transfer data from/to the device data register by PIO. - * - * LOCKING: - * Inherited from caller. - */ - -void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf, - unsigned int buflen, int write_data) -{ - struct ata_port *ap = adev->ap; - unsigned int words = buflen >> 1; - - /* Transfer multiple of 2 bytes */ - if (write_data) - outsw(ap->ioaddr.data_addr, buf, words); - else - insw(ap->ioaddr.data_addr, buf, words); - - /* Transfer trailing 1 byte, if any. */ - if (unlikely(buflen & 0x01)) { - u16 align_buf[1] = { 0 }; - unsigned char *trailing_buf = buf + buflen - 1; - - if (write_data) { - memcpy(align_buf, trailing_buf, 1); - outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr); - } else { - align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr)); - memcpy(trailing_buf, align_buf, 1); - } - } -} - -/** - * ata_pio_data_xfer_noirq - Transfer data by PIO - * @adev: device to target - * @buf: data buffer - * @buflen: buffer length - * @write_data: read/write - * - * Transfer data from/to the device data register by PIO. Do the - * transfer with interrupts disabled. - * - * LOCKING: - * Inherited from caller. - */ - -void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, - unsigned int buflen, int write_data) -{ - unsigned long flags; - local_irq_save(flags); - ata_pio_data_xfer(adev, buf, buflen, write_data); - local_irq_restore(flags); -} - - -/** - * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data. - * @qc: Command on going - * - * Transfer ATA_SECT_SIZE of data from/to the ATA device. - * - * LOCKING: - * Inherited from caller. - */ - -static void ata_pio_sector(struct ata_queued_cmd *qc) -{ - int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); - struct scatterlist *sg = qc->__sg; - struct ata_port *ap = qc->ap; - struct page *page; - unsigned int offset; - unsigned char *buf; - - if (qc->cursect == (qc->nsect - 1)) - ap->hsm_task_state = HSM_ST_LAST; - - page = sg[qc->cursg].page; - offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; - - /* get the current page and offset */ - page = nth_page(page, (offset >> PAGE_SHIFT)); - offset %= PAGE_SIZE; - - DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); - - if (PageHighMem(page)) { - unsigned long flags; - - /* FIXME: use a bounce buffer */ - local_irq_save(flags); - buf = kmap_atomic(page, KM_IRQ0); - - /* do the actual data transfer */ - ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write); - - kunmap_atomic(buf, KM_IRQ0); - local_irq_restore(flags); - } else { - buf = page_address(page); - ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write); - } - - qc->cursect++; - qc->cursg_ofs++; - - if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) { - qc->cursg++; - qc->cursg_ofs = 0; - } -} - -/** - * ata_pio_sectors - Transfer one or many 512-byte sectors. - * @qc: Command on going - * - * Transfer one or many ATA_SECT_SIZE of data from/to the - * ATA device for the DRQ request. - * - * LOCKING: - * Inherited from caller. - */ - -static void ata_pio_sectors(struct ata_queued_cmd *qc) -{ - if (is_multi_taskfile(&qc->tf)) { - /* READ/WRITE MULTIPLE */ - unsigned int nsect; - - WARN_ON(qc->dev->multi_count == 0); - - nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count); - while (nsect--) - ata_pio_sector(qc); - } else - ata_pio_sector(qc); -} - -/** - * atapi_send_cdb - Write CDB bytes to hardware - * @ap: Port to which ATAPI device is attached. - * @qc: Taskfile currently active - * - * When device has indicated its readiness to accept - * a CDB, this function is called. Send the CDB. - * - * LOCKING: - * caller. - */ - -static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) -{ - /* send SCSI cdb */ - DPRINTK("send cdb\n"); - WARN_ON(qc->dev->cdb_len < 12); - - ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); - ata_altstatus(ap); /* flush */ - - switch (qc->tf.protocol) { - case ATA_PROT_ATAPI: - ap->hsm_task_state = HSM_ST; - break; - case ATA_PROT_ATAPI_NODATA: - ap->hsm_task_state = HSM_ST_LAST; - break; - case ATA_PROT_ATAPI_DMA: - ap->hsm_task_state = HSM_ST_LAST; - /* initiate bmdma */ - ap->ops->bmdma_start(qc); - break; - } -} - -/** - * __atapi_pio_bytes - Transfer data from/to the ATAPI device. - * @qc: Command on going - * @bytes: number of bytes - * - * Transfer Transfer data from/to the ATAPI device. - * - * LOCKING: - * Inherited from caller. - * - */ - -static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) -{ - int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); - struct scatterlist *sg = qc->__sg; - struct ata_port *ap = qc->ap; - struct page *page; - unsigned char *buf; - unsigned int offset, count; - - if (qc->curbytes + bytes >= qc->nbytes) - ap->hsm_task_state = HSM_ST_LAST; - -next_sg: - if (unlikely(qc->cursg >= qc->n_elem)) { - /* - * The end of qc->sg is reached and the device expects - * more data to transfer. In order not to overrun qc->sg - * and fulfill length specified in the byte count register, - * - for read case, discard trailing data from the device - * - for write case, padding zero data to the device - */ - u16 pad_buf[1] = { 0 }; - unsigned int words = bytes >> 1; - unsigned int i; - - if (words) /* warning if bytes > 1 */ - ata_dev_printk(qc->dev, KERN_WARNING, - "%u bytes trailing data\n", bytes); - - for (i = 0; i < words; i++) - ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write); - - ap->hsm_task_state = HSM_ST_LAST; - return; - } - - sg = &qc->__sg[qc->cursg]; - - page = sg->page; - offset = sg->offset + qc->cursg_ofs; - - /* get the current page and offset */ - page = nth_page(page, (offset >> PAGE_SHIFT)); - offset %= PAGE_SIZE; - - /* don't overrun current sg */ - count = min(sg->length - qc->cursg_ofs, bytes); - - /* don't cross page boundaries */ - count = min(count, (unsigned int)PAGE_SIZE - offset); - - DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); - - if (PageHighMem(page)) { - unsigned long flags; - - /* FIXME: use bounce buffer */ - local_irq_save(flags); - buf = kmap_atomic(page, KM_IRQ0); - - /* do the actual data transfer */ - ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); - - kunmap_atomic(buf, KM_IRQ0); - local_irq_restore(flags); - } else { - buf = page_address(page); - ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); - } - - bytes -= count; - qc->curbytes += count; - qc->cursg_ofs += count; - - if (qc->cursg_ofs == sg->length) { - qc->cursg++; - qc->cursg_ofs = 0; - } - - if (bytes) - goto next_sg; -} - -/** - * atapi_pio_bytes - Transfer data from/to the ATAPI device. - * @qc: Command on going - * - * Transfer Transfer data from/to the ATAPI device. - * - * LOCKING: - * Inherited from caller. - */ - -static void atapi_pio_bytes(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - struct ata_device *dev = qc->dev; - unsigned int ireason, bc_lo, bc_hi, bytes; - int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; - - /* Abuse qc->result_tf for temp storage of intermediate TF - * here to save some kernel stack usage. - * For normal completion, qc->result_tf is not relevant. For - * error, qc->result_tf is later overwritten by ata_qc_complete(). - * So, the correctness of qc->result_tf is not affected. - */ - ap->ops->tf_read(ap, &qc->result_tf); - ireason = qc->result_tf.nsect; - bc_lo = qc->result_tf.lbam; - bc_hi = qc->result_tf.lbah; - bytes = (bc_hi << 8) | bc_lo; - - /* shall be cleared to zero, indicating xfer of data */ - if (ireason & (1 << 0)) - goto err_out; - - /* make sure transfer direction matches expected */ - i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; - if (do_write != i_write) - goto err_out; - - VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes); - - __atapi_pio_bytes(qc, bytes); - - return; - -err_out: - ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n"); - qc->err_mask |= AC_ERR_HSM; - ap->hsm_task_state = HSM_ST_ERR; -} - -/** - * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. - * @ap: the target ata_port - * @qc: qc on going - * - * RETURNS: - * 1 if ok in workqueue, 0 otherwise. - */ - -static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) -{ - if (qc->tf.flags & ATA_TFLAG_POLLING) - return 1; - - if (ap->hsm_task_state == HSM_ST_FIRST) { - if (qc->tf.protocol == ATA_PROT_PIO && - (qc->tf.flags & ATA_TFLAG_WRITE)) - return 1; - - if (is_atapi_taskfile(&qc->tf) && - !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - return 1; - } - - return 0; -} - -/** - * ata_hsm_qc_complete - finish a qc running on standard HSM - * @qc: Command to complete - * @in_wq: 1 if called from workqueue, 0 otherwise - * - * Finish @qc which is running on standard HSM. - * - * LOCKING: - * If @in_wq is zero, spin_lock_irqsave(host lock). - * Otherwise, none on entry and grabs host lock. - */ -static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) -{ - struct ata_port *ap = qc->ap; - unsigned long flags; - - if (ap->ops->error_handler) { - if (in_wq) { - spin_lock_irqsave(ap->lock, flags); - - /* EH might have kicked in while host lock is - * released. - */ - qc = ata_qc_from_tag(ap, qc->tag); - if (qc) { - if (likely(!(qc->err_mask & AC_ERR_HSM))) { - ata_irq_on(ap); - ata_qc_complete(qc); - } else - ata_port_freeze(ap); - } - - spin_unlock_irqrestore(ap->lock, flags); - } else { - if (likely(!(qc->err_mask & AC_ERR_HSM))) - ata_qc_complete(qc); - else - ata_port_freeze(ap); - } - } else { - if (in_wq) { - spin_lock_irqsave(ap->lock, flags); - ata_irq_on(ap); - ata_qc_complete(qc); - spin_unlock_irqrestore(ap->lock, flags); - } else - ata_qc_complete(qc); - } - - ata_altstatus(ap); /* flush */ -} - -/** - * ata_hsm_move - move the HSM to the next state. - * @ap: the target ata_port - * @qc: qc on going - * @status: current device status - * @in_wq: 1 if called from workqueue, 0 otherwise - * - * RETURNS: - * 1 when poll next status needed, 0 otherwise. - */ -int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, - u8 status, int in_wq) -{ - unsigned long flags = 0; - int poll_next; - - WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); - - /* Make sure ata_qc_issue_prot() does not throw things - * like DMA polling into the workqueue. Notice that - * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). - */ - WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); - -fsm_start: - DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", - ap->id, qc->tf.protocol, ap->hsm_task_state, status); - - switch (ap->hsm_task_state) { - case HSM_ST_FIRST: - /* Send first data block or PACKET CDB */ - - /* If polling, we will stay in the work queue after - * sending the data. Otherwise, interrupt handler - * takes over after sending the data. - */ - poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); - - /* check device status */ - if (unlikely((status & ATA_DRQ) == 0)) { - /* handle BSY=0, DRQ=0 as error */ - if (likely(status & (ATA_ERR | ATA_DF))) - /* device stops HSM for abort/error */ - qc->err_mask |= AC_ERR_DEV; - else - /* HSM violation. Let EH handle this */ - qc->err_mask |= AC_ERR_HSM; - - ap->hsm_task_state = HSM_ST_ERR; - goto fsm_start; - } - - /* Device should not ask for data transfer (DRQ=1) - * when it finds something wrong. - * We ignore DRQ here and stop the HSM by - * changing hsm_task_state to HSM_ST_ERR and - * let the EH abort the command or reset the device. - */ - if (unlikely(status & (ATA_ERR | ATA_DF))) { - printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n", - ap->id, status); - qc->err_mask |= AC_ERR_HSM; - ap->hsm_task_state = HSM_ST_ERR; - goto fsm_start; - } - - /* Send the CDB (atapi) or the first data block (ata pio out). - * During the state transition, interrupt handler shouldn't - * be invoked before the data transfer is complete and - * hsm_task_state is changed. Hence, the following locking. - */ - if (in_wq) - spin_lock_irqsave(ap->lock, flags); - - if (qc->tf.protocol == ATA_PROT_PIO) { - /* PIO data out protocol. - * send first data block. - */ - - /* ata_pio_sectors() might change the state - * to HSM_ST_LAST. so, the state is changed here - * before ata_pio_sectors(). - */ - ap->hsm_task_state = HSM_ST; - ata_pio_sectors(qc); - ata_altstatus(ap); /* flush */ - } else - /* send CDB */ - atapi_send_cdb(ap, qc); - - if (in_wq) - spin_unlock_irqrestore(ap->lock, flags); - - /* if polling, ata_pio_task() handles the rest. - * otherwise, interrupt handler takes over from here. - */ - break; - - case HSM_ST: - /* complete command or read/write the data register */ - if (qc->tf.protocol == ATA_PROT_ATAPI) { - /* ATAPI PIO protocol */ - if ((status & ATA_DRQ) == 0) { - /* No more data to transfer or device error. - * Device error will be tagged in HSM_ST_LAST. - */ - ap->hsm_task_state = HSM_ST_LAST; - goto fsm_start; - } - - /* Device should not ask for data transfer (DRQ=1) - * when it finds something wrong. - * We ignore DRQ here and stop the HSM by - * changing hsm_task_state to HSM_ST_ERR and - * let the EH abort the command or reset the device. - */ - if (unlikely(status & (ATA_ERR | ATA_DF))) { - printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n", - ap->id, status); - qc->err_mask |= AC_ERR_HSM; - ap->hsm_task_state = HSM_ST_ERR; - goto fsm_start; - } - - atapi_pio_bytes(qc); - - if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) - /* bad ireason reported by device */ - goto fsm_start; - - } else { - /* ATA PIO protocol */ - if (unlikely((status & ATA_DRQ) == 0)) { - /* handle BSY=0, DRQ=0 as error */ - if (likely(status & (ATA_ERR | ATA_DF))) - /* device stops HSM for abort/error */ - qc->err_mask |= AC_ERR_DEV; - else - /* HSM violation. Let EH handle this */ - qc->err_mask |= AC_ERR_HSM; - - ap->hsm_task_state = HSM_ST_ERR; - goto fsm_start; - } - - /* For PIO reads, some devices may ask for - * data transfer (DRQ=1) alone with ERR=1. - * We respect DRQ here and transfer one - * block of junk data before changing the - * hsm_task_state to HSM_ST_ERR. - * - * For PIO writes, ERR=1 DRQ=1 doesn't make - * sense since the data block has been - * transferred to the device. - */ - if (unlikely(status & (ATA_ERR | ATA_DF))) { - /* data might be corrputed */ - qc->err_mask |= AC_ERR_DEV; - - if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { - ata_pio_sectors(qc); - ata_altstatus(ap); - status = ata_wait_idle(ap); - } - - if (status & (ATA_BUSY | ATA_DRQ)) - qc->err_mask |= AC_ERR_HSM; - - /* ata_pio_sectors() might change the - * state to HSM_ST_LAST. so, the state - * is changed after ata_pio_sectors(). - */ - ap->hsm_task_state = HSM_ST_ERR; - goto fsm_start; - } - - ata_pio_sectors(qc); - - if (ap->hsm_task_state == HSM_ST_LAST && - (!(qc->tf.flags & ATA_TFLAG_WRITE))) { - /* all data read */ - ata_altstatus(ap); - status = ata_wait_idle(ap); - goto fsm_start; - } - } - - ata_altstatus(ap); /* flush */ - poll_next = 1; - break; - - case HSM_ST_LAST: - if (unlikely(!ata_ok(status))) { - qc->err_mask |= __ac_err_mask(status); - ap->hsm_task_state = HSM_ST_ERR; - goto fsm_start; - } - - /* no more data to transfer */ - DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", - ap->id, qc->dev->devno, status); - - WARN_ON(qc->err_mask); - - ap->hsm_task_state = HSM_ST_IDLE; - - /* complete taskfile transaction */ - ata_hsm_qc_complete(qc, in_wq); - - poll_next = 0; - break; - - case HSM_ST_ERR: - /* make sure qc->err_mask is available to - * know what's wrong and recover - */ - WARN_ON(qc->err_mask == 0); - - ap->hsm_task_state = HSM_ST_IDLE; - - /* complete taskfile transaction */ - ata_hsm_qc_complete(qc, in_wq); - - poll_next = 0; - break; - default: - poll_next = 0; - BUG(); - } - - return poll_next; -} - -static void ata_pio_task(void *_data) -{ - struct ata_queued_cmd *qc = _data; - struct ata_port *ap = qc->ap; - u8 status; - int poll_next; - -fsm_start: - WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); - - /* - * This is purely heuristic. This is a fast path. - * Sometimes when we enter, BSY will be cleared in - * a chk-status or two. If not, the drive is probably seeking - * or something. Snooze for a couple msecs, then - * chk-status again. If still busy, queue delayed work. - */ - status = ata_busy_wait(ap, ATA_BUSY, 5); - if (status & ATA_BUSY) { - msleep(2); - status = ata_busy_wait(ap, ATA_BUSY, 10); - if (status & ATA_BUSY) { - ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE); - return; - } - } - - /* move the HSM */ - poll_next = ata_hsm_move(ap, qc, status, 1); - - /* another command or interrupt handler - * may be running at this point. - */ - if (poll_next) - goto fsm_start; -} - -/** * ata_qc_new - Request an available ATA command, for queueing - * @ap: Port associated with device @dev - * @dev: Device from whom we request an available command structure + * @ap: target port + * + * Some ATA host controllers may implement a queue depth which is less + * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond + * the hardware limitation. * * LOCKING: * None. @@ -4338,21 +4798,27 @@ fsm_start: static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) { struct ata_queued_cmd *qc = NULL; - unsigned int i; + unsigned int max_queue = ap->host->n_tags; + unsigned int i, tag; /* no command while frozen */ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) return NULL; - /* the last tag is reserved for internal command. */ - for (i = 0; i < ATA_MAX_QUEUE - 1; i++) - if (!test_and_set_bit(i, &ap->qc_allocated)) { - qc = __ata_qc_from_tag(ap, i); + for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) { + tag = tag < max_queue ? tag : 0; + + /* the last tag is reserved for internal command. */ + if (tag == ATA_TAG_INTERNAL) + continue; + + if (!test_and_set_bit(tag, &ap->qc_allocated)) { + qc = __ata_qc_from_tag(ap, tag); + qc->tag = tag; + ap->last_tag = tag; break; } - - if (qc) - qc->tag = i; + } return qc; } @@ -4367,7 +4833,7 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) { - struct ata_port *ap = dev->ap; + struct ata_port *ap = dev->link->ap; struct ata_queued_cmd *qc; qc = ata_qc_new(ap); @@ -4394,10 +4860,11 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) */ void ata_qc_free(struct ata_queued_cmd *qc) { - struct ata_port *ap = qc->ap; + struct ata_port *ap; unsigned int tag; - WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + ap = qc->ap; qc->flags = 0; tag = qc->tag; @@ -4409,19 +4876,31 @@ void ata_qc_free(struct ata_queued_cmd *qc) void __ata_qc_complete(struct ata_queued_cmd *qc) { - struct ata_port *ap = qc->ap; + struct ata_port *ap; + struct ata_link *link; - WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ - WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); + WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); + ap = qc->ap; + link = qc->dev->link; if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) ata_sg_clean(qc); /* command should be marked inactive atomically with qc completion */ - if (qc->tf.protocol == ATA_PROT_NCQ) - ap->sactive &= ~(1 << qc->tag); - else - ap->active_tag = ATA_TAG_POISON; + if (qc->tf.protocol == ATA_PROT_NCQ) { + link->sactive &= ~(1 << qc->tag); + if (!link->sactive) + ap->nr_active_links--; + } else { + link->active_tag = ATA_TAG_POISON; + ap->nr_active_links--; + } + + /* clear exclusive status */ + if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && + ap->excl_link == link)) + ap->excl_link = NULL; /* atapi: mark qc as inactive to prevent the interrupt handler * from completing the command twice later, before the error handler @@ -4434,13 +4913,38 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) qc->complete_fn(qc); } +static void fill_result_tf(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + qc->result_tf.flags = qc->tf.flags; + ap->ops->qc_fill_rtf(qc); +} + +static void ata_verify_xfer(struct ata_queued_cmd *qc) +{ + struct ata_device *dev = qc->dev; + + if (ata_is_nodata(qc->tf.protocol)) + return; + + if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) + return; + + dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; +} + /** * ata_qc_complete - Complete an active ATA command * @qc: Command to complete - * @err_mask: ATA Status register contents * - * Indicate to the mid and upper layers that an ATA - * command has completed, with either an ok or not-ok status. + * Indicate to the mid and upper layers that an ATA command has + * completed, with either an ok or not-ok status. + * + * Refrain from calling this function multiple times when + * successfully completing multiple NCQ commands. + * ata_qc_complete_multiple() should be used instead, which will + * properly update IRQ expect state. * * LOCKING: * spin_lock_irqsave(host lock) @@ -4463,23 +4967,61 @@ void ata_qc_complete(struct ata_queued_cmd *qc) * taken care of. */ if (ap->ops->error_handler) { - WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); + struct ata_device *dev = qc->dev; + struct ata_eh_info *ehi = &dev->link->eh_info; if (unlikely(qc->err_mask)) qc->flags |= ATA_QCFLAG_FAILED; + /* + * Finish internal commands without any further processing + * and always with the result TF filled. + */ + if (unlikely(ata_tag_internal(qc->tag))) { + fill_result_tf(qc); + __ata_qc_complete(qc); + return; + } + + /* + * Non-internal qc has failed. Fill the result TF and + * summon EH. + */ if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { - if (!ata_tag_internal(qc->tag)) { - /* always fill result TF for failed qc */ - ap->ops->tf_read(ap, &qc->result_tf); - ata_qc_schedule_eh(qc); - return; - } + fill_result_tf(qc); + ata_qc_schedule_eh(qc); + return; } + WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); + /* read result TF if requested */ if (qc->flags & ATA_QCFLAG_RESULT_TF) - ap->ops->tf_read(ap, &qc->result_tf); + fill_result_tf(qc); + + /* Some commands need post-processing after successful + * completion. + */ + switch (qc->tf.command) { + case ATA_CMD_SET_FEATURES: + if (qc->tf.feature != SETFEATURES_WC_ON && + qc->tf.feature != SETFEATURES_WC_OFF) + break; + /* fall through */ + case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ + case ATA_CMD_SET_MULTI: /* multi_count changed */ + /* revalidate device */ + ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; + ata_port_schedule_eh(ap); + break; + + case ATA_CMD_SLEEP: + dev->flags |= ATA_DFLAG_SLEEPING; + break; + } + + if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) + ata_verify_xfer(qc); __ata_qc_complete(qc); } else { @@ -4488,7 +5030,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) /* read result TF if failed or requested */ if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) - ap->ops->tf_read(ap, &qc->result_tf); + fill_result_tf(qc); __ata_qc_complete(qc); } @@ -4498,75 +5040,50 @@ void ata_qc_complete(struct ata_queued_cmd *qc) * ata_qc_complete_multiple - Complete multiple qcs successfully * @ap: port in question * @qc_active: new qc_active mask - * @finish_qc: LLDD callback invoked before completing a qc * * Complete in-flight commands. This functions is meant to be * called from low-level driver's interrupt routine to complete * requests normally. ap->qc_active and @qc_active is compared * and commands are completed accordingly. * + * Always use this function when completing multiple NCQ commands + * from IRQ handlers instead of calling ata_qc_complete() + * multiple times to keep IRQ expect status properly in sync. + * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * Number of completed commands on success, -errno otherwise. */ -int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, - void (*finish_qc)(struct ata_queued_cmd *)) +int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) { int nr_done = 0; u32 done_mask; - int i; done_mask = ap->qc_active ^ qc_active; if (unlikely(done_mask & qc_active)) { - ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " - "(%08x->%08x)\n", ap->qc_active, qc_active); + ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", + ap->qc_active, qc_active); return -EINVAL; } - for (i = 0; i < ATA_MAX_QUEUE; i++) { + while (done_mask) { struct ata_queued_cmd *qc; + unsigned int tag = __ffs(done_mask); - if (!(done_mask & (1 << i))) - continue; - - if ((qc = ata_qc_from_tag(ap, i))) { - if (finish_qc) - finish_qc(qc); + qc = ata_qc_from_tag(ap, tag); + if (qc) { ata_qc_complete(qc); nr_done++; } + done_mask &= ~(1 << tag); } return nr_done; } -static inline int ata_should_dma_map(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - - switch (qc->tf.protocol) { - case ATA_PROT_NCQ: - case ATA_PROT_DMA: - case ATA_PROT_ATAPI_DMA: - return 1; - - case ATA_PROT_ATAPI: - case ATA_PROT_PIO: - if (ap->flags & ATA_FLAG_PIO_DMA) - return 1; - - /* fall through */ - - default: - return 0; - } - - /* never reached */ -} - /** * ata_qc_issue - issue taskfile to device * @qc: command to issue to device @@ -4582,34 +5099,50 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc) void ata_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + struct ata_link *link = qc->dev->link; + u8 prot = qc->tf.protocol; /* Make sure only one non-NCQ command is outstanding. The * check is skipped for old EH because it reuses active qc to * request ATAPI sense. */ - WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag)); + WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); - if (qc->tf.protocol == ATA_PROT_NCQ) { - WARN_ON(ap->sactive & (1 << qc->tag)); - ap->sactive |= 1 << qc->tag; + if (ata_is_ncq(prot)) { + WARN_ON_ONCE(link->sactive & (1 << qc->tag)); + + if (!link->sactive) + ap->nr_active_links++; + link->sactive |= 1 << qc->tag; } else { - WARN_ON(ap->sactive); - ap->active_tag = qc->tag; + WARN_ON_ONCE(link->sactive); + + ap->nr_active_links++; + link->active_tag = qc->tag; } qc->flags |= ATA_QCFLAG_ACTIVE; ap->qc_active |= 1 << qc->tag; - if (ata_should_dma_map(qc)) { - if (qc->flags & ATA_QCFLAG_SG) { - if (ata_sg_setup(qc)) - goto sg_err; - } else if (qc->flags & ATA_QCFLAG_SINGLE) { - if (ata_sg_setup_one(qc)) - goto sg_err; - } - } else { - qc->flags &= ~ATA_QCFLAG_DMAMAP; + /* + * We guarantee to LLDs that they will have at least one + * non-zero sg if the command is a data command. + */ + if (WARN_ON_ONCE(ata_is_data(prot) && + (!qc->sg || !qc->n_elem || !qc->nbytes))) + goto sys_err; + + if (ata_is_dma(prot) || (ata_is_pio(prot) && + (ap->flags & ATA_FLAG_PIO_DMA))) + if (ata_sg_setup(qc)) + goto sys_err; + + /* if device is sleeping, schedule reset and abort the link */ + if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { + link->eh_info.action |= ATA_EH_RESET; + ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); + ata_link_abort(link); + return; } ap->ops->qc_prep(qc); @@ -4619,692 +5152,555 @@ void ata_qc_issue(struct ata_queued_cmd *qc) goto err; return; -sg_err: - qc->flags &= ~ATA_QCFLAG_DMAMAP; +sys_err: qc->err_mask |= AC_ERR_SYSTEM; err: ata_qc_complete(qc); } /** - * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner - * @qc: command to issue to device - * - * Using various libata functions and hooks, this function - * starts an ATA command. ATA commands are grouped into - * classes called "protocols", and issuing each type of protocol - * is slightly different. + * sata_scr_valid - test whether SCRs are accessible + * @link: ATA link to test SCR accessibility for * - * May be used as the qc_issue() entry in ata_port_operations. + * Test whether SCRs are accessible for @link. * * LOCKING: - * spin_lock_irqsave(host lock) + * None. * * RETURNS: - * Zero on success, AC_ERR_* mask on failure + * 1 if SCRs are accessible, 0 otherwise. */ - -unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) +int sata_scr_valid(struct ata_link *link) { - struct ata_port *ap = qc->ap; - - /* Use polling pio if the LLD doesn't handle - * interrupt driven pio and atapi CDB interrupt. - */ - if (ap->flags & ATA_FLAG_PIO_POLLING) { - switch (qc->tf.protocol) { - case ATA_PROT_PIO: - case ATA_PROT_ATAPI: - case ATA_PROT_ATAPI_NODATA: - qc->tf.flags |= ATA_TFLAG_POLLING; - break; - case ATA_PROT_ATAPI_DMA: - if (qc->dev->flags & ATA_DFLAG_CDB_INTR) - /* see ata_dma_blacklisted() */ - BUG(); - break; - default: - break; - } - } - - /* select the device */ - ata_dev_select(ap, qc->dev->devno, 1, 0); - - /* start the command */ - switch (qc->tf.protocol) { - case ATA_PROT_NODATA: - if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_qc_set_polling(qc); - - ata_tf_to_host(ap, &qc->tf); - ap->hsm_task_state = HSM_ST_LAST; + struct ata_port *ap = link->ap; - if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_port_queue_task(ap, ata_pio_task, qc, 0); - - break; - - case ATA_PROT_DMA: - WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); - - ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ - ap->ops->bmdma_setup(qc); /* set up bmdma */ - ap->ops->bmdma_start(qc); /* initiate bmdma */ - ap->hsm_task_state = HSM_ST_LAST; - break; - - case ATA_PROT_PIO: - if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_qc_set_polling(qc); - - ata_tf_to_host(ap, &qc->tf); - - if (qc->tf.flags & ATA_TFLAG_WRITE) { - /* PIO data out protocol */ - ap->hsm_task_state = HSM_ST_FIRST; - ata_port_queue_task(ap, ata_pio_task, qc, 0); - - /* always send first data block using - * the ata_pio_task() codepath. - */ - } else { - /* PIO data in protocol */ - ap->hsm_task_state = HSM_ST; - - if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_port_queue_task(ap, ata_pio_task, qc, 0); - - /* if polling, ata_pio_task() handles the rest. - * otherwise, interrupt handler takes over from here. - */ - } - - break; - - case ATA_PROT_ATAPI: - case ATA_PROT_ATAPI_NODATA: - if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_qc_set_polling(qc); - - ata_tf_to_host(ap, &qc->tf); - - ap->hsm_task_state = HSM_ST_FIRST; - - /* send cdb by polling if no cdb interrupt */ - if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || - (qc->tf.flags & ATA_TFLAG_POLLING)) - ata_port_queue_task(ap, ata_pio_task, qc, 0); - break; - - case ATA_PROT_ATAPI_DMA: - WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); - - ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ - ap->ops->bmdma_setup(qc); /* set up bmdma */ - ap->hsm_task_state = HSM_ST_FIRST; - - /* send cdb by polling if no cdb interrupt */ - if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - ata_port_queue_task(ap, ata_pio_task, qc, 0); - break; - - default: - WARN_ON(1); - return AC_ERR_SYSTEM; - } - - return 0; + return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; } /** - * ata_host_intr - Handle host interrupt for given (port, task) - * @ap: Port on which interrupt arrived (possibly...) - * @qc: Taskfile currently active in engine + * sata_scr_read - read SCR register of the specified port + * @link: ATA link to read SCR for + * @reg: SCR to read + * @val: Place to store read value * - * Handle host interrupt for given queued command. Currently, - * only DMA interrupts are handled. All other commands are - * handled via polling with interrupts disabled (nIEN bit). + * Read SCR register @reg of @link into *@val. This function is + * guaranteed to succeed if @link is ap->link, the cable type of + * the port is SATA and the port implements ->scr_read. * * LOCKING: - * spin_lock_irqsave(host lock) + * None if @link is ap->link. Kernel thread context otherwise. * * RETURNS: - * One if interrupt was handled, zero if not (shared irq). + * 0 on success, negative errno on failure. */ - -inline unsigned int ata_host_intr (struct ata_port *ap, - struct ata_queued_cmd *qc) +int sata_scr_read(struct ata_link *link, int reg, u32 *val) { - u8 status, host_stat = 0; - - VPRINTK("ata%u: protocol %d task_state %d\n", - ap->id, qc->tf.protocol, ap->hsm_task_state); - - /* Check whether we are expecting interrupt in this state */ - switch (ap->hsm_task_state) { - case HSM_ST_FIRST: - /* Some pre-ATAPI-4 devices assert INTRQ - * at this state when ready to receive CDB. - */ - - /* Check the ATA_DFLAG_CDB_INTR flag is enough here. - * The flag was turned on only for atapi devices. - * No need to check is_atapi_taskfile(&qc->tf) again. - */ - if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - goto idle_irq; - break; - case HSM_ST_LAST: - if (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATA_PROT_ATAPI_DMA) { - /* check status of DMA engine */ - host_stat = ap->ops->bmdma_status(ap); - VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); - - /* if it's not our irq... */ - if (!(host_stat & ATA_DMA_INTR)) - goto idle_irq; - - /* before we do anything else, clear DMA-Start bit */ - ap->ops->bmdma_stop(qc); - - if (unlikely(host_stat & ATA_DMA_ERR)) { - /* error when transfering data to/from memory */ - qc->err_mask |= AC_ERR_HOST_BUS; - ap->hsm_task_state = HSM_ST_ERR; - } - } - break; - case HSM_ST: - break; - default: - goto idle_irq; + if (ata_is_host_link(link)) { + if (sata_scr_valid(link)) + return link->ap->ops->scr_read(link, reg, val); + return -EOPNOTSUPP; } - /* check altstatus */ - status = ata_altstatus(ap); - if (status & ATA_BUSY) - goto idle_irq; - - /* check main status, clearing INTRQ */ - status = ata_chk_status(ap); - if (unlikely(status & ATA_BUSY)) - goto idle_irq; - - /* ack bmdma irq events */ - ap->ops->irq_clear(ap); - - ata_hsm_move(ap, qc, status, 0); - return 1; /* irq handled */ - -idle_irq: - ap->stats.idle_irq++; - -#ifdef ATA_IRQ_TRAP - if ((ap->stats.idle_irq % 1000) == 0) { - ata_irq_ack(ap, 0); /* debug trap */ - ata_port_printk(ap, KERN_WARNING, "irq trap\n"); - return 1; - } -#endif - return 0; /* irq not handled */ + return sata_pmp_scr_read(link, reg, val); } /** - * ata_interrupt - Default ATA host interrupt handler - * @irq: irq line (unused) - * @dev_instance: pointer to our ata_host information structure - * @regs: unused + * sata_scr_write - write SCR register of the specified port + * @link: ATA link to write SCR for + * @reg: SCR to write + * @val: value to write * - * Default interrupt handler for PCI IDE devices. Calls - * ata_host_intr() for each port that is not disabled. + * Write @val to SCR register @reg of @link. This function is + * guaranteed to succeed if @link is ap->link, the cable type of + * the port is SATA and the port implements ->scr_read. * * LOCKING: - * Obtains host lock during operation. + * None if @link is ap->link. Kernel thread context otherwise. * * RETURNS: - * IRQ_NONE or IRQ_HANDLED. + * 0 on success, negative errno on failure. */ - -irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +int sata_scr_write(struct ata_link *link, int reg, u32 val) { - struct ata_host *host = dev_instance; - unsigned int i; - unsigned int handled = 0; - unsigned long flags; - - /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ - spin_lock_irqsave(&host->lock, flags); - - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap; - - ap = host->ports[i]; - if (ap && - !(ap->flags & ATA_FLAG_DISABLED)) { - struct ata_queued_cmd *qc; - - qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && - (qc->flags & ATA_QCFLAG_ACTIVE)) - handled |= ata_host_intr(ap, qc); - } + if (ata_is_host_link(link)) { + if (sata_scr_valid(link)) + return link->ap->ops->scr_write(link, reg, val); + return -EOPNOTSUPP; } - spin_unlock_irqrestore(&host->lock, flags); - - return IRQ_RETVAL(handled); + return sata_pmp_scr_write(link, reg, val); } /** - * sata_scr_valid - test whether SCRs are accessible - * @ap: ATA port to test SCR accessibility for + * sata_scr_write_flush - write SCR register of the specified port and flush + * @link: ATA link to write SCR for + * @reg: SCR to write + * @val: value to write * - * Test whether SCRs are accessible for @ap. + * This function is identical to sata_scr_write() except that this + * function performs flush after writing to the register. * * LOCKING: - * None. + * None if @link is ap->link. Kernel thread context otherwise. * * RETURNS: - * 1 if SCRs are accessible, 0 otherwise. + * 0 on success, negative errno on failure. */ -int sata_scr_valid(struct ata_port *ap) +int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) { - return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read; + if (ata_is_host_link(link)) { + int rc; + + if (sata_scr_valid(link)) { + rc = link->ap->ops->scr_write(link, reg, val); + if (rc == 0) + rc = link->ap->ops->scr_read(link, reg, &val); + return rc; + } + return -EOPNOTSUPP; + } + + return sata_pmp_scr_write(link, reg, val); } /** - * sata_scr_read - read SCR register of the specified port - * @ap: ATA port to read SCR for - * @reg: SCR to read - * @val: Place to store read value + * ata_phys_link_online - test whether the given link is online + * @link: ATA link to test * - * Read SCR register @reg of @ap into *@val. This function is - * guaranteed to succeed if the cable type of the port is SATA - * and the port implements ->scr_read. + * Test whether @link is online. Note that this function returns + * 0 if online status of @link cannot be obtained, so + * ata_link_online(link) != !ata_link_offline(link). * * LOCKING: * None. * * RETURNS: - * 0 on success, negative errno on failure. + * True if the port online status is available and online. */ -int sata_scr_read(struct ata_port *ap, int reg, u32 *val) +bool ata_phys_link_online(struct ata_link *link) { - if (sata_scr_valid(ap)) { - *val = ap->ops->scr_read(ap, reg); - return 0; - } - return -EOPNOTSUPP; + u32 sstatus; + + if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && + ata_sstatus_online(sstatus)) + return true; + return false; } /** - * sata_scr_write - write SCR register of the specified port - * @ap: ATA port to write SCR for - * @reg: SCR to write - * @val: value to write + * ata_phys_link_offline - test whether the given link is offline + * @link: ATA link to test * - * Write @val to SCR register @reg of @ap. This function is - * guaranteed to succeed if the cable type of the port is SATA - * and the port implements ->scr_read. + * Test whether @link is offline. Note that this function + * returns 0 if offline status of @link cannot be obtained, so + * ata_link_online(link) != !ata_link_offline(link). * * LOCKING: * None. * * RETURNS: - * 0 on success, negative errno on failure. + * True if the port offline status is available and offline. */ -int sata_scr_write(struct ata_port *ap, int reg, u32 val) +bool ata_phys_link_offline(struct ata_link *link) { - if (sata_scr_valid(ap)) { - ap->ops->scr_write(ap, reg, val); - return 0; - } - return -EOPNOTSUPP; + u32 sstatus; + + if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && + !ata_sstatus_online(sstatus)) + return true; + return false; } /** - * sata_scr_write_flush - write SCR register of the specified port and flush - * @ap: ATA port to write SCR for - * @reg: SCR to write - * @val: value to write + * ata_link_online - test whether the given link is online + * @link: ATA link to test * - * This function is identical to sata_scr_write() except that this - * function performs flush after writing to the register. + * Test whether @link is online. This is identical to + * ata_phys_link_online() when there's no slave link. When + * there's a slave link, this function should only be called on + * the master link and will return true if any of M/S links is + * online. * * LOCKING: * None. * * RETURNS: - * 0 on success, negative errno on failure. + * True if the port online status is available and online. */ -int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val) +bool ata_link_online(struct ata_link *link) { - if (sata_scr_valid(ap)) { - ap->ops->scr_write(ap, reg, val); - ap->ops->scr_read(ap, reg); - return 0; - } - return -EOPNOTSUPP; + struct ata_link *slave = link->ap->slave_link; + + WARN_ON(link == slave); /* shouldn't be called on slave link */ + + return ata_phys_link_online(link) || + (slave && ata_phys_link_online(slave)); } /** - * ata_port_online - test whether the given port is online - * @ap: ATA port to test + * ata_link_offline - test whether the given link is offline + * @link: ATA link to test * - * Test whether @ap is online. Note that this function returns 0 - * if online status of @ap cannot be obtained, so - * ata_port_online(ap) != !ata_port_offline(ap). + * Test whether @link is offline. This is identical to + * ata_phys_link_offline() when there's no slave link. When + * there's a slave link, this function should only be called on + * the master link and will return true if both M/S links are + * offline. * * LOCKING: * None. * * RETURNS: - * 1 if the port online status is available and online. + * True if the port offline status is available and offline. */ -int ata_port_online(struct ata_port *ap) +bool ata_link_offline(struct ata_link *link) { - u32 sstatus; + struct ata_link *slave = link->ap->slave_link; - if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3) - return 1; - return 0; + WARN_ON(link == slave); /* shouldn't be called on slave link */ + + return ata_phys_link_offline(link) && + (!slave || ata_phys_link_offline(slave)); } -/** - * ata_port_offline - test whether the given port is offline - * @ap: ATA port to test - * - * Test whether @ap is offline. Note that this function returns - * 0 if offline status of @ap cannot be obtained, so - * ata_port_online(ap) != !ata_port_offline(ap). - * - * LOCKING: - * None. +#ifdef CONFIG_PM +static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, + unsigned int action, unsigned int ehi_flags, + bool async) +{ + struct ata_link *link; + unsigned long flags; + + /* Previous resume operation might still be in + * progress. Wait for PM_PENDING to clear. + */ + if (ap->pflags & ATA_PFLAG_PM_PENDING) { + ata_port_wait_eh(ap); + WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); + } + + /* request PM ops to EH */ + spin_lock_irqsave(ap->lock, flags); + + ap->pm_mesg = mesg; + ap->pflags |= ATA_PFLAG_PM_PENDING; + ata_for_each_link(link, ap, HOST_FIRST) { + link->eh_info.action |= action; + link->eh_info.flags |= ehi_flags; + } + + ata_port_schedule_eh(ap); + + spin_unlock_irqrestore(ap->lock, flags); + + if (!async) { + ata_port_wait_eh(ap); + WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); + } +} + +/* + * On some hardware, device fails to respond after spun down for suspend. As + * the device won't be used before being resumed, we don't need to touch the + * device. Ask EH to skip the usual stuff and proceed directly to suspend. * - * RETURNS: - * 1 if the port offline status is available and offline. + * http://thread.gmane.org/gmane.linux.ide/46764 */ -int ata_port_offline(struct ata_port *ap) +static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET + | ATA_EHI_NO_AUTOPSY + | ATA_EHI_NO_RECOVERY; + +static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) { - u32 sstatus; + ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); +} - if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3) - return 1; - return 0; +static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) +{ + ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); } -int ata_flush_cache(struct ata_device *dev) +static int ata_port_pm_suspend(struct device *dev) { - unsigned int err_mask; - u8 cmd; + struct ata_port *ap = to_ata_port(dev); - if (!ata_try_flush_cache(dev)) + if (pm_runtime_suspended(dev)) return 0; - if (ata_id_has_flush_ext(dev->id)) - cmd = ATA_CMD_FLUSH_EXT; - else - cmd = ATA_CMD_FLUSH; + ata_port_suspend(ap, PMSG_SUSPEND); + return 0; +} - err_mask = ata_do_simple_cmd(dev, cmd); - if (err_mask) { - ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n"); - return -EIO; - } +static int ata_port_pm_freeze(struct device *dev) +{ + struct ata_port *ap = to_ata_port(dev); + + if (pm_runtime_suspended(dev)) + return 0; + ata_port_suspend(ap, PMSG_FREEZE); return 0; } -static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, - unsigned int action, unsigned int ehi_flags, - int wait) +static int ata_port_pm_poweroff(struct device *dev) { - unsigned long flags; - int i, rc; + ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE); + return 0; +} - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap = host->ports[i]; +static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY + | ATA_EHI_QUIET; - /* Previous resume operation might still be in - * progress. Wait for PM_PENDING to clear. - */ - if (ap->pflags & ATA_PFLAG_PM_PENDING) { - ata_port_wait_eh(ap); - WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); - } +static void ata_port_resume(struct ata_port *ap, pm_message_t mesg) +{ + ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false); +} - /* request PM ops to EH */ - spin_lock_irqsave(ap->lock, flags); +static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg) +{ + ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true); +} - ap->pm_mesg = mesg; - if (wait) { - rc = 0; - ap->pm_result = &rc; - } +static int ata_port_pm_resume(struct device *dev) +{ + ata_port_resume_async(to_ata_port(dev), PMSG_RESUME); + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + return 0; +} - ap->pflags |= ATA_PFLAG_PM_PENDING; - ap->eh_info.action |= action; - ap->eh_info.flags |= ehi_flags; +/* + * For ODDs, the upper layer will poll for media change every few seconds, + * which will make it enter and leave suspend state every few seconds. And + * as each suspend will cause a hard/soft reset, the gain of runtime suspend + * is very little and the ODD may malfunction after constantly being reset. + * So the idle callback here will not proceed to suspend if a non-ZPODD capable + * ODD is attached to the port. + */ +static int ata_port_runtime_idle(struct device *dev) +{ + struct ata_port *ap = to_ata_port(dev); + struct ata_link *link; + struct ata_device *adev; - ata_port_schedule_eh(ap); + ata_for_each_link(link, ap, HOST_FIRST) { + ata_for_each_dev(adev, link, ENABLED) + if (adev->class == ATA_DEV_ATAPI && + !zpodd_dev_enabled(adev)) + return -EBUSY; + } - spin_unlock_irqrestore(ap->lock, flags); + return 0; +} - /* wait and check result */ - if (wait) { - ata_port_wait_eh(ap); - WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); - if (rc) - return rc; - } - } +static int ata_port_runtime_suspend(struct device *dev) +{ + ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND); + return 0; +} +static int ata_port_runtime_resume(struct device *dev) +{ + ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME); return 0; } +static const struct dev_pm_ops ata_port_pm_ops = { + .suspend = ata_port_pm_suspend, + .resume = ata_port_pm_resume, + .freeze = ata_port_pm_freeze, + .thaw = ata_port_pm_resume, + .poweroff = ata_port_pm_poweroff, + .restore = ata_port_pm_resume, + + .runtime_suspend = ata_port_runtime_suspend, + .runtime_resume = ata_port_runtime_resume, + .runtime_idle = ata_port_runtime_idle, +}; + +/* sas ports don't participate in pm runtime management of ata_ports, + * and need to resume ata devices at the domain level, not the per-port + * level. sas suspend/resume is async to allow parallel port recovery + * since sas has multiple ata_port instances per Scsi_Host. + */ +void ata_sas_port_suspend(struct ata_port *ap) +{ + ata_port_suspend_async(ap, PMSG_SUSPEND); +} +EXPORT_SYMBOL_GPL(ata_sas_port_suspend); + +void ata_sas_port_resume(struct ata_port *ap) +{ + ata_port_resume_async(ap, PMSG_RESUME); +} +EXPORT_SYMBOL_GPL(ata_sas_port_resume); + /** * ata_host_suspend - suspend host * @host: host to suspend * @mesg: PM message * - * Suspend @host. Actual operation is performed by EH. This - * function requests EH to perform PM operations and waits for EH - * to finish. - * - * LOCKING: - * Kernel thread context (may sleep). - * - * RETURNS: - * 0 on success, -errno on failure. + * Suspend @host. Actual operation is performed by port suspend. */ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) { - int i, j, rc; - - rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); - if (rc) - goto fail; - - /* EH is quiescent now. Fail if we have any ready device. - * This happens if hotplug occurs between completion of device - * suspension and here. - */ - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap = host->ports[i]; - - for (j = 0; j < ATA_MAX_DEVICES; j++) { - struct ata_device *dev = &ap->device[j]; - - if (ata_dev_ready(dev)) { - ata_port_printk(ap, KERN_WARNING, - "suspend failed, device %d " - "still active\n", dev->devno); - rc = -EBUSY; - goto fail; - } - } - } - host->dev->power.power_state = mesg; return 0; - - fail: - ata_host_resume(host); - return rc; } /** * ata_host_resume - resume host * @host: host to resume * - * Resume @host. Actual operation is performed by EH. This - * function requests EH to perform PM operations and returns. - * Note that all resume operations are performed parallely. - * - * LOCKING: - * Kernel thread context (may sleep). + * Resume @host. Actual operation is performed by port resume. */ void ata_host_resume(struct ata_host *host) { - ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, - ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); host->dev->power.power_state = PMSG_ON; } +#endif + +struct device_type ata_port_type = { + .name = "ata_port", +#ifdef CONFIG_PM + .pm = &ata_port_pm_ops, +#endif +}; /** - * ata_port_start - Set port up for dma. - * @ap: Port to initialize - * - * Called just after data structures for each port are - * initialized. Allocates space for PRD table. + * ata_dev_init - Initialize an ata_device structure + * @dev: Device structure to initialize * - * May be used as the port_start() entry in ata_port_operations. + * Initialize @dev in preparation for probing. * * LOCKING: * Inherited from caller. */ - -int ata_port_start (struct ata_port *ap) +void ata_dev_init(struct ata_device *dev) { - struct device *dev = ap->dev; - int rc; - - ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL); - if (!ap->prd) - return -ENOMEM; + struct ata_link *link = ata_dev_phys_link(dev); + struct ata_port *ap = link->ap; + unsigned long flags; - rc = ata_pad_alloc(ap, dev); - if (rc) { - dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); - return rc; - } + /* SATA spd limit is bound to the attached device, reset together */ + link->sata_spd_limit = link->hw_sata_spd_limit; + link->sata_spd = 0; - DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); + /* High bits of dev->flags are used to record warm plug + * requests which occur asynchronously. Synchronize using + * host lock. + */ + spin_lock_irqsave(ap->lock, flags); + dev->flags &= ~ATA_DFLAG_INIT_MASK; + dev->horkage = 0; + spin_unlock_irqrestore(ap->lock, flags); - return 0; + memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, + ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); + dev->pio_mask = UINT_MAX; + dev->mwdma_mask = UINT_MAX; + dev->udma_mask = UINT_MAX; } - /** - * ata_port_stop - Undo ata_port_start() - * @ap: Port to shut down + * ata_link_init - Initialize an ata_link structure + * @ap: ATA port link is attached to + * @link: Link structure to initialize + * @pmp: Port multiplier port number * - * Frees the PRD table. - * - * May be used as the port_stop() entry in ata_port_operations. + * Initialize @link. * * LOCKING: - * Inherited from caller. + * Kernel thread context (may sleep) */ - -void ata_port_stop (struct ata_port *ap) +void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) { - struct device *dev = ap->dev; + int i; - dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); - ata_pad_free(ap, dev); -} + /* clear everything except for devices */ + memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, + ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); -void ata_host_stop (struct ata_host *host) -{ - if (host->mmio_base) - iounmap(host->mmio_base); + link->ap = ap; + link->pmp = pmp; + link->active_tag = ATA_TAG_POISON; + link->hw_sata_spd_limit = UINT_MAX; + + /* can't use iterator, ap isn't initialized yet */ + for (i = 0; i < ATA_MAX_DEVICES; i++) { + struct ata_device *dev = &link->device[i]; + + dev->link = link; + dev->devno = dev - link->device; +#ifdef CONFIG_ATA_ACPI + dev->gtf_filter = ata_acpi_gtf_filter; +#endif + ata_dev_init(dev); + } } /** - * ata_dev_init - Initialize an ata_device structure - * @dev: Device structure to initialize + * sata_link_init_spd - Initialize link->sata_spd_limit + * @link: Link to configure sata_spd_limit for * - * Initialize @dev in preparation for probing. + * Initialize @link->[hw_]sata_spd_limit to the currently + * configured value. * * LOCKING: - * Inherited from caller. + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno on failure. */ -void ata_dev_init(struct ata_device *dev) +int sata_link_init_spd(struct ata_link *link) { - struct ata_port *ap = dev->ap; - unsigned long flags; + u8 spd; + int rc; - /* SATA spd limit is bound to the first device */ - ap->sata_spd_limit = ap->hw_sata_spd_limit; + rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); + if (rc) + return rc; - /* High bits of dev->flags are used to record warm plug - * requests which occur asynchronously. Synchronize using - * host lock. - */ - spin_lock_irqsave(ap->lock, flags); - dev->flags &= ~ATA_DFLAG_INIT_MASK; - spin_unlock_irqrestore(ap->lock, flags); + spd = (link->saved_scontrol >> 4) & 0xf; + if (spd) + link->hw_sata_spd_limit &= (1 << spd) - 1; - memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0, - sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET); - dev->pio_mask = UINT_MAX; - dev->mwdma_mask = UINT_MAX; - dev->udma_mask = UINT_MAX; + ata_force_link_limits(link); + + link->sata_spd_limit = link->hw_sata_spd_limit; + + return 0; } /** - * ata_port_init - Initialize an ata_port structure - * @ap: Structure to initialize - * @host: Collection of hosts to which @ap belongs - * @ent: Probe information provided by low-level driver - * @port_no: Port number associated with this ata_port + * ata_port_alloc - allocate and initialize basic ATA port resources + * @host: ATA host this allocated port belongs to + * + * Allocate and initialize basic ATA port resources. * - * Initialize a new ata_port structure. + * RETURNS: + * Allocate ATA port on success, NULL on failure. * * LOCKING: - * Inherited from caller. + * Inherited from calling layer (may sleep). */ -void ata_port_init(struct ata_port *ap, struct ata_host *host, - const struct ata_probe_ent *ent, unsigned int port_no) +struct ata_port *ata_port_alloc(struct ata_host *host) { - unsigned int i; + struct ata_port *ap; + DPRINTK("ENTER\n"); + + ap = kzalloc(sizeof(*ap), GFP_KERNEL); + if (!ap) + return NULL; + + ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; ap->lock = &host->lock; - ap->flags = ATA_FLAG_DISABLED; - ap->id = ata_unique_id++; - ap->ctl = ATA_DEVCTL_OBS; + ap->print_id = -1; + ap->local_port_no = -1; ap->host = host; - ap->dev = ent->dev; - ap->port_no = port_no; - if (port_no == 1 && ent->pinfo2) { - ap->pio_mask = ent->pinfo2->pio_mask; - ap->mwdma_mask = ent->pinfo2->mwdma_mask; - ap->udma_mask = ent->pinfo2->udma_mask; - ap->flags |= ent->pinfo2->flags; - ap->ops = ent->pinfo2->port_ops; - } else { - ap->pio_mask = ent->pio_mask; - ap->mwdma_mask = ent->mwdma_mask; - ap->udma_mask = ent->udma_mask; - ap->flags |= ent->port_flags; - ap->ops = ent->port_ops; - } - ap->hw_sata_spd_limit = UINT_MAX; - ap->active_tag = ATA_TAG_POISON; - ap->last_ctl = 0xFF; + ap->dev = host->dev; #if defined(ATA_VERBOSE_DEBUG) /* turn on all debugging levels */ @@ -5315,541 +5711,693 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host, ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; #endif - INIT_WORK(&ap->port_task, NULL, NULL); - INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap); - INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap); + mutex_init(&ap->scsi_scan_mutex); + INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); + INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); INIT_LIST_HEAD(&ap->eh_done_q); init_waitqueue_head(&ap->eh_wait_q); + init_completion(&ap->park_req_pending); + init_timer_deferrable(&ap->fastdrain_timer); + ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; + ap->fastdrain_timer.data = (unsigned long)ap; - /* set cable type */ ap->cbl = ATA_CBL_NONE; - if (ap->flags & ATA_FLAG_SATA) - ap->cbl = ATA_CBL_SATA; - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; - dev->ap = ap; - dev->devno = i; - ata_dev_init(dev); - } + ata_link_init(ap, &ap->link, 0); #ifdef ATA_IRQ_TRAP ap->stats.unhandled_irq = 1; ap->stats.idle_irq = 1; #endif + ata_sff_port_init(ap); - memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports)); + return ap; } -/** - * ata_port_init_shost - Initialize SCSI host associated with ATA port - * @ap: ATA port to initialize SCSI host for - * @shost: SCSI host associated with @ap - * - * Initialize SCSI host @shost associated with ATA port @ap. - * - * LOCKING: - * Inherited from caller. - */ -static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost) +static void ata_host_release(struct device *gendev, void *res) { - ap->scsi_host = shost; + struct ata_host *host = dev_get_drvdata(gendev); + int i; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + if (!ap) + continue; + + if (ap->scsi_host) + scsi_host_put(ap->scsi_host); + + kfree(ap->pmp_link); + kfree(ap->slave_link); + kfree(ap); + host->ports[i] = NULL; + } - shost->unique_id = ap->id; - shost->max_id = 16; - shost->max_lun = 1; - shost->max_channel = 1; - shost->max_cmd_len = 12; + dev_set_drvdata(gendev, NULL); } /** - * ata_port_add - Attach low-level ATA driver to system - * @ent: Information provided by low-level driver - * @host: Collections of ports to which we add - * @port_no: Port number associated with this host + * ata_host_alloc - allocate and init basic ATA host resources + * @dev: generic device this host is associated with + * @max_ports: maximum number of ATA ports associated with this host * - * Attach low-level ATA driver to system. + * Allocate and initialize basic ATA host resources. LLD calls + * this function to allocate a host, initializes it fully and + * attaches it using ata_host_register(). * - * LOCKING: - * PCI/etc. bus probe sem. + * @max_ports ports are allocated and host->n_ports is + * initialized to @max_ports. The caller is allowed to decrease + * host->n_ports before calling ata_host_register(). The unused + * ports will be automatically freed on registration. * * RETURNS: - * New ata_port on success, for NULL on error. + * Allocate ATA host on success, NULL on failure. + * + * LOCKING: + * Inherited from calling layer (may sleep). */ -static struct ata_port * ata_port_add(const struct ata_probe_ent *ent, - struct ata_host *host, - unsigned int port_no) +struct ata_host *ata_host_alloc(struct device *dev, int max_ports) { - struct Scsi_Host *shost; - struct ata_port *ap; + struct ata_host *host; + size_t sz; + int i; DPRINTK("ENTER\n"); - if (!ent->port_ops->error_handler && - !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) { - printk(KERN_ERR "ata%u: no reset mechanism available\n", - port_no); + if (!devres_open_group(dev, NULL, GFP_KERNEL)) return NULL; - } - shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port)); - if (!shost) - return NULL; + /* alloc a container for our list of ATA ports (buses) */ + sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); + /* alloc a container for our list of ATA ports (buses) */ + host = devres_alloc(ata_host_release, sz, GFP_KERNEL); + if (!host) + goto err_out; - shost->transportt = &ata_scsi_transport_template; + devres_add(dev, host); + dev_set_drvdata(dev, host); - ap = ata_shost_to_port(shost); + spin_lock_init(&host->lock); + mutex_init(&host->eh_mutex); + host->dev = dev; + host->n_ports = max_ports; - ata_port_init(ap, host, ent, port_no); - ata_port_init_shost(ap, shost); + /* allocate ports bound to this host */ + for (i = 0; i < max_ports; i++) { + struct ata_port *ap; - return ap; -} + ap = ata_port_alloc(host); + if (!ap) + goto err_out; -/** - * ata_sas_host_init - Initialize a host struct - * @host: host to initialize - * @dev: device host is attached to - * @flags: host flags - * @ops: port_ops - * - * LOCKING: - * PCI/etc. bus probe sem. - * - */ + ap->port_no = i; + host->ports[i] = ap; + } -void ata_host_init(struct ata_host *host, struct device *dev, - unsigned long flags, const struct ata_port_operations *ops) -{ - spin_lock_init(&host->lock); - host->dev = dev; - host->flags = flags; - host->ops = ops; + devres_remove_group(dev, NULL); + return host; + + err_out: + devres_release_group(dev, NULL); + return NULL; } /** - * ata_device_add - Register hardware device with ATA and SCSI layers - * @ent: Probe information describing hardware device to be registered + * ata_host_alloc_pinfo - alloc host and init with port_info array + * @dev: generic device this host is associated with + * @ppi: array of ATA port_info to initialize host with + * @n_ports: number of ATA ports attached to this host * - * This function processes the information provided in the probe - * information struct @ent, allocates the necessary ATA and SCSI - * host information structures, initializes them, and registers - * everything with requisite kernel subsystems. + * Allocate ATA host and initialize with info from @ppi. If NULL + * terminated, @ppi may contain fewer entries than @n_ports. The + * last entry will be used for the remaining ports. * - * This function requests irqs, probes the ATA bus, and probes - * the SCSI bus. + * RETURNS: + * Allocate ATA host on success, NULL on failure. * * LOCKING: - * PCI/etc. bus probe sem. - * - * RETURNS: - * Number of ports registered. Zero on error (no ports registered). + * Inherited from calling layer (may sleep). */ -int ata_device_add(const struct ata_probe_ent *ent) +struct ata_host *ata_host_alloc_pinfo(struct device *dev, + const struct ata_port_info * const * ppi, + int n_ports) { - unsigned int i; - struct device *dev = ent->dev; + const struct ata_port_info *pi; struct ata_host *host; - int rc; + int i, j; - DPRINTK("ENTER\n"); - - if (ent->irq == 0) { - dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n"); - return 0; - } - /* alloc a container for our list of ATA ports (buses) */ - host = kzalloc(sizeof(struct ata_host) + - (ent->n_ports * sizeof(void *)), GFP_KERNEL); + host = ata_host_alloc(dev, n_ports); if (!host) - return 0; + return NULL; - ata_host_init(host, dev, ent->_host_flags, ent->port_ops); - host->n_ports = ent->n_ports; - host->irq = ent->irq; - host->irq2 = ent->irq2; - host->mmio_base = ent->mmio_base; - host->private_data = ent->private_data; + for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; - /* register each port bound to this device */ - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap; - unsigned long xfer_mode_mask; - int irq_line = ent->irq; + if (ppi[j]) + pi = ppi[j++]; - ap = ata_port_add(ent, host, i); - if (!ap) - goto err_out; + ap->pio_mask = pi->pio_mask; + ap->mwdma_mask = pi->mwdma_mask; + ap->udma_mask = pi->udma_mask; + ap->flags |= pi->flags; + ap->link.flags |= pi->link_flags; + ap->ops = pi->port_ops; - host->ports[i] = ap; + if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) + host->ops = pi->port_ops; + } - /* dummy? */ - if (ent->dummy_port_mask & (1 << i)) { - ata_port_printk(ap, KERN_INFO, "DUMMY\n"); - ap->ops = &ata_dummy_port_ops; - continue; - } + return host; +} - /* start port */ - rc = ap->ops->port_start(ap); - if (rc) { - host->ports[i] = NULL; - scsi_host_put(ap->scsi_host); - goto err_out; - } +/** + * ata_slave_link_init - initialize slave link + * @ap: port to initialize slave link for + * + * Create and initialize slave link for @ap. This enables slave + * link handling on the port. + * + * In libata, a port contains links and a link contains devices. + * There is single host link but if a PMP is attached to it, + * there can be multiple fan-out links. On SATA, there's usually + * a single device connected to a link but PATA and SATA + * controllers emulating TF based interface can have two - master + * and slave. + * + * However, there are a few controllers which don't fit into this + * abstraction too well - SATA controllers which emulate TF + * interface with both master and slave devices but also have + * separate SCR register sets for each device. These controllers + * need separate links for physical link handling + * (e.g. onlineness, link speed) but should be treated like a + * traditional M/S controller for everything else (e.g. command + * issue, softreset). + * + * slave_link is libata's way of handling this class of + * controllers without impacting core layer too much. For + * anything other than physical link handling, the default host + * link is used for both master and slave. For physical link + * handling, separate @ap->slave_link is used. All dirty details + * are implemented inside libata core layer. From LLD's POV, the + * only difference is that prereset, hardreset and postreset are + * called once more for the slave link, so the reset sequence + * looks like the following. + * + * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> + * softreset(M) -> postreset(M) -> postreset(S) + * + * Note that softreset is called only for the master. Softreset + * resets both M/S by definition, so SRST on master should handle + * both (the standard method will work just fine). + * + * LOCKING: + * Should be called before host is registered. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int ata_slave_link_init(struct ata_port *ap) +{ + struct ata_link *link; - /* Report the secondary IRQ for second channel legacy */ - if (i == 1 && ent->irq2) - irq_line = ent->irq2; + WARN_ON(ap->slave_link); + WARN_ON(ap->flags & ATA_FLAG_PMP); - xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) | - (ap->mwdma_mask << ATA_SHIFT_MWDMA) | - (ap->pio_mask << ATA_SHIFT_PIO); + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; - /* print per-port info to dmesg */ - ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX " - "ctl 0x%lX bmdma 0x%lX irq %d\n", - ap->flags & ATA_FLAG_SATA ? 'S' : 'P', - ata_mode_string(xfer_mode_mask), - ap->ioaddr.cmd_addr, - ap->ioaddr.ctl_addr, - ap->ioaddr.bmdma_addr, - irq_line); - - ata_chk_status(ap); - host->ops->irq_clear(ap); - ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */ - } - - /* obtain irq, that may be shared between channels */ - rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, - DRV_NAME, host); - if (rc) { - dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n", - ent->irq, rc); - goto err_out; - } + ata_link_init(ap, link, 1); + ap->slave_link = link; + return 0; +} - /* do we have a second IRQ for the other channel, eg legacy mode */ - if (ent->irq2) { - /* We will get weird core code crashes later if this is true - so trap it now */ - BUG_ON(ent->irq == ent->irq2); +static void ata_host_stop(struct device *gendev, void *res) +{ + struct ata_host *host = dev_get_drvdata(gendev); + int i; - rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags, - DRV_NAME, host); - if (rc) { - dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n", - ent->irq2, rc); - goto err_out_free_irq; - } - } + WARN_ON(!(host->flags & ATA_HOST_STARTED)); - /* perform each probe synchronously */ - DPRINTK("probe begin\n"); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - u32 scontrol; - int rc; - /* init sata_spd_limit to the current value */ - if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) { - int spd = (scontrol >> 4) & 0xf; - ap->hw_sata_spd_limit &= (1 << spd) - 1; - } - ap->sata_spd_limit = ap->hw_sata_spd_limit; + if (ap->ops->port_stop) + ap->ops->port_stop(ap); + } - rc = scsi_add_host(ap->scsi_host, dev); - if (rc) { - ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n"); - /* FIXME: do something useful here */ - /* FIXME: handle unconditional calls to - * scsi_scan_host and ata_host_remove, below, - * at the very least - */ - } + if (host->ops->host_stop) + host->ops->host_stop(host); +} - if (ap->ops->error_handler) { - struct ata_eh_info *ehi = &ap->eh_info; - unsigned long flags; +/** + * ata_finalize_port_ops - finalize ata_port_operations + * @ops: ata_port_operations to finalize + * + * An ata_port_operations can inherit from another ops and that + * ops can again inherit from another. This can go on as many + * times as necessary as long as there is no loop in the + * inheritance chain. + * + * Ops tables are finalized when the host is started. NULL or + * unspecified entries are inherited from the closet ancestor + * which has the method and the entry is populated with it. + * After finalization, the ops table directly points to all the + * methods and ->inherits is no longer necessary and cleared. + * + * Using ATA_OP_NULL, inheriting ops can force a method to NULL. + * + * LOCKING: + * None. + */ +static void ata_finalize_port_ops(struct ata_port_operations *ops) +{ + static DEFINE_SPINLOCK(lock); + const struct ata_port_operations *cur; + void **begin = (void **)ops; + void **end = (void **)&ops->inherits; + void **pp; - ata_port_probe(ap); + if (!ops || !ops->inherits) + return; - /* kick EH for boot probing */ - spin_lock_irqsave(ap->lock, flags); + spin_lock(&lock); - ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1; - ehi->action |= ATA_EH_SOFTRESET; - ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; + for (cur = ops->inherits; cur; cur = cur->inherits) { + void **inherit = (void **)cur; - ap->pflags |= ATA_PFLAG_LOADING; - ata_port_schedule_eh(ap); + for (pp = begin; pp < end; pp++, inherit++) + if (!*pp) + *pp = *inherit; + } - spin_unlock_irqrestore(ap->lock, flags); + for (pp = begin; pp < end; pp++) + if (IS_ERR(*pp)) + *pp = NULL; - /* wait for EH to finish */ - ata_port_wait_eh(ap); - } else { - DPRINTK("ata%u: bus probe begin\n", ap->id); - rc = ata_bus_probe(ap); - DPRINTK("ata%u: bus probe end\n", ap->id); + ops->inherits = NULL; - if (rc) { - /* FIXME: do something useful here? - * Current libata behavior will - * tear down everything when - * the module is removed - * or the h/w is unplugged. - */ - } - } - } + spin_unlock(&lock); +} + +/** + * ata_host_start - start and freeze ports of an ATA host + * @host: ATA host to start ports for + * + * Start and then freeze ports of @host. Started status is + * recorded in host->flags, so this function can be called + * multiple times. Ports are guaranteed to get started only + * once. If host->ops isn't initialized yet, its set to the + * first non-dummy port ops. + * + * LOCKING: + * Inherited from calling layer (may sleep). + * + * RETURNS: + * 0 if all ports are started successfully, -errno otherwise. + */ +int ata_host_start(struct ata_host *host) +{ + int have_stop = 0; + void *start_dr = NULL; + int i, rc; + + if (host->flags & ATA_HOST_STARTED) + return 0; + + ata_finalize_port_ops(host->ops); - /* probes are done, now scan each port's disk(s) */ - DPRINTK("host probe begin\n"); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - ata_scsi_scan_host(ap); + ata_finalize_port_ops(ap->ops); + + if (!host->ops && !ata_port_is_dummy(ap)) + host->ops = ap->ops; + + if (ap->ops->port_stop) + have_stop = 1; } - dev_set_drvdata(dev, host); + if (host->ops->host_stop) + have_stop = 1; - VPRINTK("EXIT, returning %u\n", ent->n_ports); - return ent->n_ports; /* success */ + if (have_stop) { + start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); + if (!start_dr) + return -ENOMEM; + } -err_out_free_irq: - free_irq(ent->irq, host); -err_out: for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - if (ap) { - ap->ops->port_stop(ap); - scsi_host_put(ap->scsi_host); + + if (ap->ops->port_start) { + rc = ap->ops->port_start(ap); + if (rc) { + if (rc != -ENODEV) + dev_err(host->dev, + "failed to start port %d (errno=%d)\n", + i, rc); + goto err_out; + } } + ata_eh_freeze_port(ap); } - kfree(host); - VPRINTK("EXIT, returning 0\n"); + if (start_dr) + devres_add(host->dev, start_dr); + host->flags |= ATA_HOST_STARTED; return 0; + + err_out: + while (--i >= 0) { + struct ata_port *ap = host->ports[i]; + + if (ap->ops->port_stop) + ap->ops->port_stop(ap); + } + devres_free(start_dr); + return rc; } /** - * ata_port_detach - Detach ATA port in prepration of device removal - * @ap: ATA port to be detached - * - * Detach all ATA devices and the associated SCSI devices of @ap; - * then, remove the associated SCSI host. @ap is guaranteed to - * be quiescent on return from this function. + * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas) + * @host: host to initialize + * @dev: device host is attached to + * @ops: port_ops * - * LOCKING: - * Kernel thread context (may sleep). */ -void ata_port_detach(struct ata_port *ap) +void ata_host_init(struct ata_host *host, struct device *dev, + struct ata_port_operations *ops) { - unsigned long flags; - int i; + spin_lock_init(&host->lock); + mutex_init(&host->eh_mutex); + host->n_tags = ATA_MAX_QUEUE - 1; + host->dev = dev; + host->ops = ops; +} - if (!ap->ops->error_handler) - goto skip_eh; +void __ata_port_probe(struct ata_port *ap) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + unsigned long flags; - /* tell EH we're leaving & flush EH */ + /* kick EH for boot probing */ spin_lock_irqsave(ap->lock, flags); - ap->pflags |= ATA_PFLAG_UNLOADING; + + ehi->probe_mask |= ATA_ALL_DEVICES; + ehi->action |= ATA_EH_RESET; + ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; + + ap->pflags &= ~ATA_PFLAG_INITIALIZING; + ap->pflags |= ATA_PFLAG_LOADING; + ata_port_schedule_eh(ap); + spin_unlock_irqrestore(ap->lock, flags); +} - ata_port_wait_eh(ap); +int ata_port_probe(struct ata_port *ap) +{ + int rc = 0; - /* EH is now guaranteed to see UNLOADING, so no new device - * will be attached. Disable all existing devices. - */ - spin_lock_irqsave(ap->lock, flags); + if (ap->ops->error_handler) { + __ata_port_probe(ap); + ata_port_wait_eh(ap); + } else { + DPRINTK("ata%u: bus probe begin\n", ap->print_id); + rc = ata_bus_probe(ap); + DPRINTK("ata%u: bus probe end\n", ap->print_id); + } + return rc; +} - for (i = 0; i < ATA_MAX_DEVICES; i++) - ata_dev_disable(&ap->device[i]); - spin_unlock_irqrestore(ap->lock, flags); +static void async_port_probe(void *data, async_cookie_t cookie) +{ + struct ata_port *ap = data; - /* Final freeze & EH. All in-flight commands are aborted. EH - * will be skipped and retrials will be terminated with bad - * target. + /* + * If we're not allowed to scan this host in parallel, + * we need to wait until all previous scans have completed + * before going further. + * Jeff Garzik says this is only within a controller, so we + * don't need to wait for port 0, only for later ports. */ - spin_lock_irqsave(ap->lock, flags); - ata_port_freeze(ap); /* won't be thawed */ - spin_unlock_irqrestore(ap->lock, flags); + if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) + async_synchronize_cookie(cookie); - ata_port_wait_eh(ap); + (void)ata_port_probe(ap); - /* Flush hotplug task. The sequence is similar to - * ata_port_flush_task(). - */ - flush_workqueue(ata_aux_wq); - cancel_delayed_work(&ap->hotplug_task); - flush_workqueue(ata_aux_wq); + /* in order to keep device order, we need to synchronize at this point */ + async_synchronize_cookie(cookie); - skip_eh: - /* remove the associated SCSI host */ - scsi_remove_host(ap->scsi_host); + ata_scsi_scan_host(ap, 1); } /** - * ata_host_remove - PCI layer callback for device removal - * @host: ATA host set that was removed + * ata_host_register - register initialized ATA host + * @host: ATA host to register + * @sht: template for SCSI host * - * Unregister all objects associated with this host set. Free those - * objects. + * Register initialized ATA host. @host is allocated using + * ata_host_alloc() and fully initialized by LLD. This function + * starts ports, registers @host with ATA and SCSI layers and + * probe registered devices. * * LOCKING: * Inherited from calling layer (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. */ - -void ata_host_remove(struct ata_host *host) +int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) { - unsigned int i; + int i, rc; - for (i = 0; i < host->n_ports; i++) - ata_port_detach(host->ports[i]); + host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); - free_irq(host->irq, host); - if (host->irq2) - free_irq(host->irq2, host); + /* host must have been started */ + if (!(host->flags & ATA_HOST_STARTED)) { + dev_err(host->dev, "BUG: trying to register unstarted host\n"); + WARN_ON(1); + return -EINVAL; + } + + /* Blow away unused ports. This happens when LLD can't + * determine the exact number of ports to allocate at + * allocation time. + */ + for (i = host->n_ports; host->ports[i]; i++) + kfree(host->ports[i]); + /* give ports names and add SCSI hosts */ + for (i = 0; i < host->n_ports; i++) { + host->ports[i]->print_id = atomic_inc_return(&ata_print_id); + host->ports[i]->local_port_no = i + 1; + } + + /* Create associated sysfs transport objects */ + for (i = 0; i < host->n_ports; i++) { + rc = ata_tport_add(host->dev,host->ports[i]); + if (rc) { + goto err_tadd; + } + } + + rc = ata_scsi_add_hosts(host, sht); + if (rc) + goto err_tadd; + + /* set cable, sata_spd_limit and report */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; + unsigned long xfer_mask; - ata_scsi_release(ap->scsi_host); + /* set SATA cable type if still unset */ + if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) + ap->cbl = ATA_CBL_SATA; - if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { - struct ata_ioports *ioaddr = &ap->ioaddr; + /* init sata_spd_limit to the current value */ + sata_link_init_spd(&ap->link); + if (ap->slave_link) + sata_link_init_spd(ap->slave_link); - /* FIXME: Add -ac IDE pci mods to remove these special cases */ - if (ioaddr->cmd_addr == ATA_PRIMARY_CMD) - release_region(ATA_PRIMARY_CMD, 8); - else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD) - release_region(ATA_SECONDARY_CMD, 8); - } + /* print per-port info to dmesg */ + xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, + ap->udma_mask); + + if (!ata_port_is_dummy(ap)) { + ata_port_info(ap, "%cATA max %s %s\n", + (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', + ata_mode_string(xfer_mask), + ap->link.eh_info.desc); + ata_ehi_clear_desc(&ap->link.eh_info); + } else + ata_port_info(ap, "DUMMY\n"); + } - scsi_host_put(ap->scsi_host); + /* perform each probe asynchronously */ + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + async_schedule(async_port_probe, ap); } - if (host->ops->host_stop) - host->ops->host_stop(host); + return 0; + + err_tadd: + while (--i >= 0) { + ata_tport_delete(host->ports[i]); + } + return rc; - kfree(host); } /** - * ata_scsi_release - SCSI layer callback hook for host unload - * @host: libata host to be unloaded - * - * Performs all duties necessary to shut down a libata port... - * Kill port kthread, disable port, and release resources. + * ata_host_activate - start host, request IRQ and register it + * @host: target ATA host + * @irq: IRQ to request + * @irq_handler: irq_handler used when requesting IRQ + * @irq_flags: irq_flags used when requesting IRQ + * @sht: scsi_host_template to use when registering the host + * + * After allocating an ATA host and initializing it, most libata + * LLDs perform three steps to activate the host - start host, + * request IRQ and register it. This helper takes necessasry + * arguments and performs the three steps in one go. + * + * An invalid IRQ skips the IRQ registration and expects the host to + * have set polling mode on the port. In this case, @irq_handler + * should be NULL. * * LOCKING: - * Inherited from SCSI layer. + * Inherited from calling layer (may sleep). * * RETURNS: - * One. + * 0 on success, -errno otherwise. */ - -int ata_scsi_release(struct Scsi_Host *shost) +int ata_host_activate(struct ata_host *host, int irq, + irq_handler_t irq_handler, unsigned long irq_flags, + struct scsi_host_template *sht) { - struct ata_port *ap = ata_shost_to_port(shost); + int i, rc; - DPRINTK("ENTER\n"); + rc = ata_host_start(host); + if (rc) + return rc; - ap->ops->port_disable(ap); - ap->ops->port_stop(ap); + /* Special case for polling mode */ + if (!irq) { + WARN_ON(irq_handler); + return ata_host_register(host, sht); + } - DPRINTK("EXIT\n"); - return 1; + rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, + dev_driver_string(host->dev), host); + if (rc) + return rc; + + for (i = 0; i < host->n_ports; i++) + ata_port_desc(host->ports[i], "irq %d", irq); + + rc = ata_host_register(host, sht); + /* if failed, just free the IRQ and leave ports alone */ + if (rc) + devm_free_irq(host->dev, irq, host); + + return rc; } -struct ata_probe_ent * -ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) +/** + * ata_port_detach - Detach ATA port in prepration of device removal + * @ap: ATA port to be detached + * + * Detach all ATA devices and the associated SCSI devices of @ap; + * then, remove the associated SCSI host. @ap is guaranteed to + * be quiescent on return from this function. + * + * LOCKING: + * Kernel thread context (may sleep). + */ +static void ata_port_detach(struct ata_port *ap) { - struct ata_probe_ent *probe_ent; + unsigned long flags; + struct ata_link *link; + struct ata_device *dev; - probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); - if (!probe_ent) { - printk(KERN_ERR DRV_NAME "(%s): out of memory\n", - kobject_name(&(dev->kobj))); - return NULL; - } + if (!ap->ops->error_handler) + goto skip_eh; + + /* tell EH we're leaving & flush EH */ + spin_lock_irqsave(ap->lock, flags); + ap->pflags |= ATA_PFLAG_UNLOADING; + ata_port_schedule_eh(ap); + spin_unlock_irqrestore(ap->lock, flags); - INIT_LIST_HEAD(&probe_ent->node); - probe_ent->dev = dev; + /* wait till EH commits suicide */ + ata_port_wait_eh(ap); - probe_ent->sht = port->sht; - probe_ent->port_flags = port->flags; - probe_ent->pio_mask = port->pio_mask; - probe_ent->mwdma_mask = port->mwdma_mask; - probe_ent->udma_mask = port->udma_mask; - probe_ent->port_ops = port->port_ops; + /* it better be dead now */ + WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); - return probe_ent; + cancel_delayed_work_sync(&ap->hotplug_task); + + skip_eh: + /* clean up zpodd on port removal */ + ata_for_each_link(link, ap, HOST_FIRST) { + ata_for_each_dev(dev, link, ALL) { + if (zpodd_dev_enabled(dev)) + zpodd_exit(dev); + } + } + if (ap->pmp_link) { + int i; + for (i = 0; i < SATA_PMP_MAX_PORTS; i++) + ata_tlink_delete(&ap->pmp_link[i]); + } + /* remove the associated SCSI host */ + scsi_remove_host(ap->scsi_host); + ata_tport_delete(ap); } /** - * ata_std_ports - initialize ioaddr with standard port offsets. - * @ioaddr: IO address structure to be initialized + * ata_host_detach - Detach all ports of an ATA host + * @host: Host to detach * - * Utility function which initializes data_addr, error_addr, - * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, - * device_addr, status_addr, and command_addr to standard offsets - * relative to cmd_addr. + * Detach all ports of @host. * - * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. + * LOCKING: + * Kernel thread context (may sleep). */ - -void ata_std_ports(struct ata_ioports *ioaddr) +void ata_host_detach(struct ata_host *host) { - ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; - ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; - ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; - ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; - ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; - ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; - ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; - ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; - ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; - ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; -} - - -#ifdef CONFIG_PCI + int i; -void ata_pci_host_stop (struct ata_host *host) -{ - struct pci_dev *pdev = to_pci_dev(host->dev); + for (i = 0; i < host->n_ports; i++) + ata_port_detach(host->ports[i]); - pci_iounmap(pdev, host->mmio_base); + /* the host is dead now, dissociate ACPI */ + ata_acpi_dissociate(host); } +#ifdef CONFIG_PCI + /** * ata_pci_remove_one - PCI layer callback for device removal * @pdev: PCI device that was removed * - * PCI layer indicates to libata via this hook that - * hot-unplug or module unload event has occurred. - * Handle this by unregistering all objects associated - * with this PCI device. Free those objects. Then finally - * release PCI resources and disable device. + * PCI layer indicates to libata via this hook that hot-unplug or + * module unload event has occurred. Detach all ports. Resource + * release is handled via devres. * * LOCKING: * Inherited from PCI layer (may sleep). */ - -void ata_pci_remove_one (struct pci_dev *pdev) +void ata_pci_remove_one(struct pci_dev *pdev) { - struct device *dev = pci_dev_to_dev(pdev); - struct ata_host *host = dev_get_drvdata(dev); + struct ata_host *host = pci_get_drvdata(pdev); - ata_host_remove(host); - - pci_release_regions(pdev); - pci_disable_device(pdev); - dev_set_drvdata(dev, NULL); + ata_host_detach(host); } /* move to PCI subsystem */ @@ -5886,27 +6434,37 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) return (tmp == bits->val) ? 1 : 0; } +#ifdef CONFIG_PM void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) { pci_save_state(pdev); + pci_disable_device(pdev); - if (mesg.event == PM_EVENT_SUSPEND) { - pci_disable_device(pdev); + if (mesg.event & PM_EVENT_SLEEP) pci_set_power_state(pdev, PCI_D3hot); - } } -void ata_pci_device_do_resume(struct pci_dev *pdev) +int ata_pci_device_do_resume(struct pci_dev *pdev) { + int rc; + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - pci_enable_device(pdev); + + rc = pcim_enable_device(pdev); + if (rc) { + dev_err(&pdev->dev, + "failed to enable device after resume (%d)\n", rc); + return rc; + } + pci_set_master(pdev); + return 0; } int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { - struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct ata_host *host = pci_get_drvdata(pdev); int rc = 0; rc = ata_host_suspend(host, mesg); @@ -5920,69 +6478,300 @@ int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) int ata_pci_device_resume(struct pci_dev *pdev) { - struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct ata_host *host = pci_get_drvdata(pdev); + int rc; - ata_pci_device_do_resume(pdev); - ata_host_resume(host); - return 0; + rc = ata_pci_device_do_resume(pdev); + if (rc == 0) + ata_host_resume(host); + return rc; } +#endif /* CONFIG_PM */ + #endif /* CONFIG_PCI */ +/** + * ata_platform_remove_one - Platform layer callback for device removal + * @pdev: Platform device that was removed + * + * Platform layer indicates to libata via this hook that hot-unplug or + * module unload event has occurred. Detach all ports. Resource + * release is handled via devres. + * + * LOCKING: + * Inherited from platform layer (may sleep). + */ +int ata_platform_remove_one(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + + ata_host_detach(host); + + return 0; +} + +static int __init ata_parse_force_one(char **cur, + struct ata_force_ent *force_ent, + const char **reason) +{ + /* FIXME: Currently, there's no way to tag init const data and + * using __initdata causes build failure on some versions of + * gcc. Once __initdataconst is implemented, add const to the + * following structure. + */ + static struct ata_force_param force_tbl[] __initdata = { + { "40c", .cbl = ATA_CBL_PATA40 }, + { "80c", .cbl = ATA_CBL_PATA80 }, + { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, + { "unk", .cbl = ATA_CBL_PATA_UNK }, + { "ign", .cbl = ATA_CBL_PATA_IGN }, + { "sata", .cbl = ATA_CBL_SATA }, + { "1.5Gbps", .spd_limit = 1 }, + { "3.0Gbps", .spd_limit = 2 }, + { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, + { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, + { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, + { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, + { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, + { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, + { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, + { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, + { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, + { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, + { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, + { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, + { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, + { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, + { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, + { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, + { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, + { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, + { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, + { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, + { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, + { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, + { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, + { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, + { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, + { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, + { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, + { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, + { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, + { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, + { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, + { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, + { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, + { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, + { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, + { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, + { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, + { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, + { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, + { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, + { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, + { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, + { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, + }; + char *start = *cur, *p = *cur; + char *id, *val, *endp; + const struct ata_force_param *match_fp = NULL; + int nr_matches = 0, i; + + /* find where this param ends and update *cur */ + while (*p != '\0' && *p != ',') + p++; + + if (*p == '\0') + *cur = p; + else + *cur = p + 1; + + *p = '\0'; + + /* parse */ + p = strchr(start, ':'); + if (!p) { + val = strstrip(start); + goto parse_val; + } + *p = '\0'; + + id = strstrip(start); + val = strstrip(p + 1); + + /* parse id */ + p = strchr(id, '.'); + if (p) { + *p++ = '\0'; + force_ent->device = simple_strtoul(p, &endp, 10); + if (p == endp || *endp != '\0') { + *reason = "invalid device"; + return -EINVAL; + } + } + + force_ent->port = simple_strtoul(id, &endp, 10); + if (p == endp || *endp != '\0') { + *reason = "invalid port/link"; + return -EINVAL; + } + + parse_val: + /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ + for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { + const struct ata_force_param *fp = &force_tbl[i]; + + if (strncasecmp(val, fp->name, strlen(val))) + continue; + + nr_matches++; + match_fp = fp; + + if (strcasecmp(val, fp->name) == 0) { + nr_matches = 1; + break; + } + } + + if (!nr_matches) { + *reason = "unknown value"; + return -EINVAL; + } + if (nr_matches > 1) { + *reason = "ambigious value"; + return -EINVAL; + } + + force_ent->param = *match_fp; + + return 0; +} + +static void __init ata_parse_force_param(void) +{ + int idx = 0, size = 1; + int last_port = -1, last_device = -1; + char *p, *cur, *next; + + /* calculate maximum number of params and allocate force_tbl */ + for (p = ata_force_param_buf; *p; p++) + if (*p == ',') + size++; + + ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); + if (!ata_force_tbl) { + printk(KERN_WARNING "ata: failed to extend force table, " + "libata.force ignored\n"); + return; + } + + /* parse and populate the table */ + for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { + const char *reason = ""; + struct ata_force_ent te = { .port = -1, .device = -1 }; + + next = cur; + if (ata_parse_force_one(&next, &te, &reason)) { + printk(KERN_WARNING "ata: failed to parse force " + "parameter \"%s\" (%s)\n", + cur, reason); + continue; + } + + if (te.port == -1) { + te.port = last_port; + te.device = last_device; + } + + ata_force_tbl[idx++] = te; + + last_port = te.port; + last_device = te.device; + } + + ata_force_tbl_size = idx; +} static int __init ata_init(void) { - ata_probe_timeout *= HZ; - ata_wq = create_workqueue("ata"); - if (!ata_wq) - return -ENOMEM; + int rc; - ata_aux_wq = create_singlethread_workqueue("ata_aux"); - if (!ata_aux_wq) { - destroy_workqueue(ata_wq); - return -ENOMEM; + ata_parse_force_param(); + + rc = ata_sff_init(); + if (rc) { + kfree(ata_force_tbl); + return rc; + } + + libata_transport_init(); + ata_scsi_transport_template = ata_attach_transport(); + if (!ata_scsi_transport_template) { + ata_sff_exit(); + rc = -ENOMEM; + goto err_out; } printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); return 0; + +err_out: + return rc; } static void __exit ata_exit(void) { - destroy_workqueue(ata_wq); - destroy_workqueue(ata_aux_wq); + ata_release_transport(ata_scsi_transport_template); + libata_transport_exit(); + ata_sff_exit(); + kfree(ata_force_tbl); } -module_init(ata_init); +subsys_initcall(ata_init); module_exit(ata_exit); -static unsigned long ratelimit_time; -static DEFINE_SPINLOCK(ata_ratelimit_lock); +static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); int ata_ratelimit(void) { - int rc; - unsigned long flags; + return __ratelimit(&ratelimit); +} - spin_lock_irqsave(&ata_ratelimit_lock, flags); +/** + * ata_msleep - ATA EH owner aware msleep + * @ap: ATA port to attribute the sleep to + * @msecs: duration to sleep in milliseconds + * + * Sleeps @msecs. If the current task is owner of @ap's EH, the + * ownership is released before going to sleep and reacquired + * after the sleep is complete. IOW, other ports sharing the + * @ap->host will be allowed to own the EH while this task is + * sleeping. + * + * LOCKING: + * Might sleep. + */ +void ata_msleep(struct ata_port *ap, unsigned int msecs) +{ + bool owns_eh = ap && ap->host->eh_owner == current; - if (time_after(jiffies, ratelimit_time)) { - rc = 1; - ratelimit_time = jiffies + (HZ/5); - } else - rc = 0; + if (owns_eh) + ata_eh_release(ap); - spin_unlock_irqrestore(&ata_ratelimit_lock, flags); + msleep(msecs); - return rc; + if (owns_eh) + ata_eh_acquire(ap); } /** * ata_wait_register - wait until register value changes + * @ap: ATA port to wait register for, can be NULL * @reg: IO-mapped register * @mask: Mask to apply to read register value * @val: Wait condition - * @interval_msec: polling interval in milliseconds - * @timeout_msec: timeout in milliseconds + * @interval: polling interval in milliseconds + * @timeout: timeout in milliseconds * * Waiting for some bits of register to change is a common * operation for ATA controllers. This function reads 32bit LE @@ -5999,11 +6788,10 @@ int ata_ratelimit(void) * RETURNS: * The final register value. */ -u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, - unsigned long interval_msec, - unsigned long timeout_msec) +u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, + unsigned long interval, unsigned long timeout) { - unsigned long timeout; + unsigned long deadline; u32 tmp; tmp = ioread32(reg); @@ -6012,10 +6800,10 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, * preceding writes reach the controller before starting to * eat away the timeout. */ - timeout = jiffies + (timeout_msec * HZ) / 1000; + deadline = ata_deadline(jiffies, timeout); - while ((tmp & mask) == val && time_before(jiffies, timeout)) { - msleep(interval_msec); + while ((tmp & mask) == val && time_before(jiffies, deadline)) { + ata_msleep(ap, interval); tmp = ioread32(reg); } @@ -6025,152 +6813,221 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, /* * Dummy port_ops */ -static void ata_dummy_noret(struct ata_port *ap) { } -static int ata_dummy_ret0(struct ata_port *ap) { return 0; } -static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { } - -static u8 ata_dummy_check_status(struct ata_port *ap) +static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) { - return ATA_DRDY; + return AC_ERR_SYSTEM; } -static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) +static void ata_dummy_error_handler(struct ata_port *ap) { - return AC_ERR_SYSTEM; + /* truly dummy */ } -const struct ata_port_operations ata_dummy_port_ops = { - .port_disable = ata_port_disable, - .check_status = ata_dummy_check_status, - .check_altstatus = ata_dummy_check_status, - .dev_select = ata_noop_dev_select, +struct ata_port_operations ata_dummy_port_ops = { .qc_prep = ata_noop_qc_prep, .qc_issue = ata_dummy_qc_issue, - .freeze = ata_dummy_noret, - .thaw = ata_dummy_noret, - .error_handler = ata_dummy_noret, - .post_internal_cmd = ata_dummy_qc_noret, - .irq_clear = ata_dummy_noret, - .port_start = ata_dummy_ret0, - .port_stop = ata_dummy_noret, + .error_handler = ata_dummy_error_handler, + .sched_eh = ata_std_sched_eh, + .end_eh = ata_std_end_eh, }; +const struct ata_port_info ata_dummy_port_info = { + .port_ops = &ata_dummy_port_ops, +}; + +/* + * Utility print functions + */ +int ata_port_printk(const struct ata_port *ap, const char *level, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + int r; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + r = printk("%sata%u: %pV", level, ap->print_id, &vaf); + + va_end(args); + + return r; +} +EXPORT_SYMBOL(ata_port_printk); + +int ata_link_printk(const struct ata_link *link, const char *level, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + int r; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (sata_pmp_attached(link->ap) || link->ap->slave_link) + r = printk("%sata%u.%02u: %pV", + level, link->ap->print_id, link->pmp, &vaf); + else + r = printk("%sata%u: %pV", + level, link->ap->print_id, &vaf); + + va_end(args); + + return r; +} +EXPORT_SYMBOL(ata_link_printk); + +int ata_dev_printk(const struct ata_device *dev, const char *level, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + int r; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + r = printk("%sata%u.%02u: %pV", + level, dev->link->ap->print_id, dev->link->pmp + dev->devno, + &vaf); + + va_end(args); + + return r; +} +EXPORT_SYMBOL(ata_dev_printk); + +void ata_print_version(const struct device *dev, const char *version) +{ + dev_printk(KERN_DEBUG, dev, "version %s\n", version); +} +EXPORT_SYMBOL(ata_print_version); + /* * libata is essentially a library of internal helper functions for * low-level ATA host controller drivers. As such, the API/ABI is * likely to change as new drivers are added and updated. * Do not depend on ABI/API stability. */ - EXPORT_SYMBOL_GPL(sata_deb_timing_normal); EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); EXPORT_SYMBOL_GPL(sata_deb_timing_long); +EXPORT_SYMBOL_GPL(ata_base_port_ops); +EXPORT_SYMBOL_GPL(sata_port_ops); EXPORT_SYMBOL_GPL(ata_dummy_port_ops); +EXPORT_SYMBOL_GPL(ata_dummy_port_info); +EXPORT_SYMBOL_GPL(ata_link_next); +EXPORT_SYMBOL_GPL(ata_dev_next); EXPORT_SYMBOL_GPL(ata_std_bios_param); -EXPORT_SYMBOL_GPL(ata_std_ports); +EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); EXPORT_SYMBOL_GPL(ata_host_init); -EXPORT_SYMBOL_GPL(ata_device_add); -EXPORT_SYMBOL_GPL(ata_port_detach); -EXPORT_SYMBOL_GPL(ata_host_remove); +EXPORT_SYMBOL_GPL(ata_host_alloc); +EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); +EXPORT_SYMBOL_GPL(ata_slave_link_init); +EXPORT_SYMBOL_GPL(ata_host_start); +EXPORT_SYMBOL_GPL(ata_host_register); +EXPORT_SYMBOL_GPL(ata_host_activate); +EXPORT_SYMBOL_GPL(ata_host_detach); EXPORT_SYMBOL_GPL(ata_sg_init); -EXPORT_SYMBOL_GPL(ata_sg_init_one); -EXPORT_SYMBOL_GPL(ata_hsm_move); EXPORT_SYMBOL_GPL(ata_qc_complete); EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); -EXPORT_SYMBOL_GPL(ata_qc_issue_prot); -EXPORT_SYMBOL_GPL(ata_tf_load); -EXPORT_SYMBOL_GPL(ata_tf_read); -EXPORT_SYMBOL_GPL(ata_noop_dev_select); -EXPORT_SYMBOL_GPL(ata_std_dev_select); +EXPORT_SYMBOL_GPL(atapi_cmd_type); EXPORT_SYMBOL_GPL(ata_tf_to_fis); EXPORT_SYMBOL_GPL(ata_tf_from_fis); -EXPORT_SYMBOL_GPL(ata_check_status); -EXPORT_SYMBOL_GPL(ata_altstatus); -EXPORT_SYMBOL_GPL(ata_exec_command); -EXPORT_SYMBOL_GPL(ata_port_start); -EXPORT_SYMBOL_GPL(ata_port_stop); -EXPORT_SYMBOL_GPL(ata_host_stop); -EXPORT_SYMBOL_GPL(ata_interrupt); -EXPORT_SYMBOL_GPL(ata_mmio_data_xfer); -EXPORT_SYMBOL_GPL(ata_pio_data_xfer); -EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq); -EXPORT_SYMBOL_GPL(ata_qc_prep); +EXPORT_SYMBOL_GPL(ata_pack_xfermask); +EXPORT_SYMBOL_GPL(ata_unpack_xfermask); +EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); +EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); +EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); +EXPORT_SYMBOL_GPL(ata_mode_string); +EXPORT_SYMBOL_GPL(ata_id_xfermask); +EXPORT_SYMBOL_GPL(ata_do_set_mode); +EXPORT_SYMBOL_GPL(ata_std_qc_defer); EXPORT_SYMBOL_GPL(ata_noop_qc_prep); -EXPORT_SYMBOL_GPL(ata_bmdma_setup); -EXPORT_SYMBOL_GPL(ata_bmdma_start); -EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); -EXPORT_SYMBOL_GPL(ata_bmdma_status); -EXPORT_SYMBOL_GPL(ata_bmdma_stop); -EXPORT_SYMBOL_GPL(ata_bmdma_freeze); -EXPORT_SYMBOL_GPL(ata_bmdma_thaw); -EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh); -EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); -EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); -EXPORT_SYMBOL_GPL(ata_port_probe); +EXPORT_SYMBOL_GPL(ata_dev_disable); EXPORT_SYMBOL_GPL(sata_set_spd); -EXPORT_SYMBOL_GPL(sata_phy_debounce); -EXPORT_SYMBOL_GPL(sata_phy_resume); -EXPORT_SYMBOL_GPL(sata_phy_reset); -EXPORT_SYMBOL_GPL(__sata_phy_reset); -EXPORT_SYMBOL_GPL(ata_bus_reset); +EXPORT_SYMBOL_GPL(ata_wait_after_reset); +EXPORT_SYMBOL_GPL(sata_link_debounce); +EXPORT_SYMBOL_GPL(sata_link_resume); +EXPORT_SYMBOL_GPL(sata_link_scr_lpm); EXPORT_SYMBOL_GPL(ata_std_prereset); -EXPORT_SYMBOL_GPL(ata_std_softreset); +EXPORT_SYMBOL_GPL(sata_link_hardreset); EXPORT_SYMBOL_GPL(sata_std_hardreset); EXPORT_SYMBOL_GPL(ata_std_postreset); -EXPORT_SYMBOL_GPL(ata_dev_revalidate); EXPORT_SYMBOL_GPL(ata_dev_classify); EXPORT_SYMBOL_GPL(ata_dev_pair); -EXPORT_SYMBOL_GPL(ata_port_disable); EXPORT_SYMBOL_GPL(ata_ratelimit); +EXPORT_SYMBOL_GPL(ata_msleep); EXPORT_SYMBOL_GPL(ata_wait_register); -EXPORT_SYMBOL_GPL(ata_busy_sleep); -EXPORT_SYMBOL_GPL(ata_port_queue_task); -EXPORT_SYMBOL_GPL(ata_scsi_ioctl); EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); EXPORT_SYMBOL_GPL(ata_scsi_slave_config); EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); -EXPORT_SYMBOL_GPL(ata_scsi_release); -EXPORT_SYMBOL_GPL(ata_host_intr); +EXPORT_SYMBOL_GPL(__ata_change_queue_depth); EXPORT_SYMBOL_GPL(sata_scr_valid); EXPORT_SYMBOL_GPL(sata_scr_read); EXPORT_SYMBOL_GPL(sata_scr_write); EXPORT_SYMBOL_GPL(sata_scr_write_flush); -EXPORT_SYMBOL_GPL(ata_port_online); -EXPORT_SYMBOL_GPL(ata_port_offline); +EXPORT_SYMBOL_GPL(ata_link_online); +EXPORT_SYMBOL_GPL(ata_link_offline); +#ifdef CONFIG_PM EXPORT_SYMBOL_GPL(ata_host_suspend); EXPORT_SYMBOL_GPL(ata_host_resume); +#endif /* CONFIG_PM */ EXPORT_SYMBOL_GPL(ata_id_string); EXPORT_SYMBOL_GPL(ata_id_c_string); +EXPORT_SYMBOL_GPL(ata_do_dev_read_id); EXPORT_SYMBOL_GPL(ata_scsi_simulate); EXPORT_SYMBOL_GPL(ata_pio_need_iordy); +EXPORT_SYMBOL_GPL(ata_timing_find_mode); EXPORT_SYMBOL_GPL(ata_timing_compute); EXPORT_SYMBOL_GPL(ata_timing_merge); +EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); #ifdef CONFIG_PCI EXPORT_SYMBOL_GPL(pci_test_config_bits); -EXPORT_SYMBOL_GPL(ata_pci_host_stop); -EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); -EXPORT_SYMBOL_GPL(ata_pci_init_one); EXPORT_SYMBOL_GPL(ata_pci_remove_one); +#ifdef CONFIG_PM EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); EXPORT_SYMBOL_GPL(ata_pci_device_suspend); EXPORT_SYMBOL_GPL(ata_pci_device_resume); -EXPORT_SYMBOL_GPL(ata_pci_default_filter); -EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); +#endif /* CONFIG_PM */ #endif /* CONFIG_PCI */ -EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); -EXPORT_SYMBOL_GPL(ata_scsi_device_resume); +EXPORT_SYMBOL_GPL(ata_platform_remove_one); -EXPORT_SYMBOL_GPL(ata_eng_timeout); +EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); +EXPORT_SYMBOL_GPL(ata_ehi_push_desc); +EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); +EXPORT_SYMBOL_GPL(ata_port_desc); +#ifdef CONFIG_PCI +EXPORT_SYMBOL_GPL(ata_port_pbar_desc); +#endif /* CONFIG_PCI */ EXPORT_SYMBOL_GPL(ata_port_schedule_eh); +EXPORT_SYMBOL_GPL(ata_link_abort); EXPORT_SYMBOL_GPL(ata_port_abort); EXPORT_SYMBOL_GPL(ata_port_freeze); +EXPORT_SYMBOL_GPL(sata_async_notification); EXPORT_SYMBOL_GPL(ata_eh_freeze_port); EXPORT_SYMBOL_GPL(ata_eh_thaw_port); EXPORT_SYMBOL_GPL(ata_eh_qc_complete); EXPORT_SYMBOL_GPL(ata_eh_qc_retry); +EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); EXPORT_SYMBOL_GPL(ata_do_eh); +EXPORT_SYMBOL_GPL(ata_std_error_handler); + +EXPORT_SYMBOL_GPL(ata_cable_40wire); +EXPORT_SYMBOL_GPL(ata_cable_80wire); +EXPORT_SYMBOL_GPL(ata_cable_unknown); +EXPORT_SYMBOL_GPL(ata_cable_ignore); +EXPORT_SYMBOL_GPL(ata_cable_sata); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 02b2b2787d9..dad83df555c 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1,7 +1,7 @@ /* * libata-eh.c - libata error handling * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -33,23 +33,348 @@ */ #include <linux/kernel.h> +#include <linux/blkdev.h> +#include <linux/export.h> +#include <linux/pci.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> +#include <scsi/scsi_dbg.h> #include "../scsi/scsi_transport_api.h" #include <linux/libata.h> #include "libata.h" +enum { + /* speed down verdicts */ + ATA_EH_SPDN_NCQ_OFF = (1 << 0), + ATA_EH_SPDN_SPEED_DOWN = (1 << 1), + ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), + ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), + + /* error flags */ + ATA_EFLAG_IS_IO = (1 << 0), + ATA_EFLAG_DUBIOUS_XFER = (1 << 1), + ATA_EFLAG_OLD_ER = (1 << 31), + + /* error categories */ + ATA_ECAT_NONE = 0, + ATA_ECAT_ATA_BUS = 1, + ATA_ECAT_TOUT_HSM = 2, + ATA_ECAT_UNK_DEV = 3, + ATA_ECAT_DUBIOUS_NONE = 4, + ATA_ECAT_DUBIOUS_ATA_BUS = 5, + ATA_ECAT_DUBIOUS_TOUT_HSM = 6, + ATA_ECAT_DUBIOUS_UNK_DEV = 7, + ATA_ECAT_NR = 8, + + ATA_EH_CMD_DFL_TIMEOUT = 5000, + + /* always put at least this amount of time between resets */ + ATA_EH_RESET_COOL_DOWN = 5000, + + /* Waiting in ->prereset can never be reliable. It's + * sometimes nice to wait there but it can't be depended upon; + * otherwise, we wouldn't be resetting. Just give it enough + * time for most drives to spin up. + */ + ATA_EH_PRERESET_TIMEOUT = 10000, + ATA_EH_FASTDRAIN_INTERVAL = 3000, + + ATA_EH_UA_TRIES = 5, + + /* probe speed down parameters, see ata_eh_schedule_probe() */ + ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ + ATA_EH_PROBE_TRIALS = 2, +}; + +/* The following table determines how we sequence resets. Each entry + * represents timeout for that try. The first try can be soft or + * hardreset. All others are hardreset if available. In most cases + * the first reset w/ 10sec timeout should succeed. Following entries + * are mostly for error handling, hotplug and those outlier devices that + * take an exceptionally long time to recover from reset. + */ +static const unsigned long ata_eh_reset_timeouts[] = { + 10000, /* most drives spin up by 10sec */ + 10000, /* > 99% working drives spin up before 20sec */ + 35000, /* give > 30 secs of idleness for outlier devices */ + 5000, /* and sweet one last chance */ + ULONG_MAX, /* > 1 min has elapsed, give up */ +}; + +static const unsigned long ata_eh_identify_timeouts[] = { + 5000, /* covers > 99% of successes and not too boring on failures */ + 10000, /* combined time till here is enough even for media access */ + 30000, /* for true idiots */ + ULONG_MAX, +}; + +static const unsigned long ata_eh_flush_timeouts[] = { + 15000, /* be generous with flush */ + 15000, /* ditto */ + 30000, /* and even more generous */ + ULONG_MAX, +}; + +static const unsigned long ata_eh_other_timeouts[] = { + 5000, /* same rationale as identify timeout */ + 10000, /* ditto */ + /* but no merciful 30sec for other commands, it just isn't worth it */ + ULONG_MAX, +}; + +struct ata_eh_cmd_timeout_ent { + const u8 *commands; + const unsigned long *timeouts; +}; + +/* The following table determines timeouts to use for EH internal + * commands. Each table entry is a command class and matches the + * commands the entry applies to and the timeout table to use. + * + * On the retry after a command timed out, the next timeout value from + * the table is used. If the table doesn't contain further entries, + * the last value is used. + * + * ehc->cmd_timeout_idx keeps track of which timeout to use per + * command class, so if SET_FEATURES times out on the first try, the + * next try will use the second timeout value only for that class. + */ +#define CMDS(cmds...) (const u8 []){ cmds, 0 } +static const struct ata_eh_cmd_timeout_ent +ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { + { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), + .timeouts = ata_eh_identify_timeouts, }, + { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), + .timeouts = ata_eh_other_timeouts, }, + { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), + .timeouts = ata_eh_other_timeouts, }, + { .commands = CMDS(ATA_CMD_SET_FEATURES), + .timeouts = ata_eh_other_timeouts, }, + { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), + .timeouts = ata_eh_other_timeouts, }, + { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), + .timeouts = ata_eh_flush_timeouts }, +}; +#undef CMDS + static void __ata_port_freeze(struct ata_port *ap); -static void ata_eh_finish(struct ata_port *ap); +#ifdef CONFIG_PM static void ata_eh_handle_port_suspend(struct ata_port *ap); static void ata_eh_handle_port_resume(struct ata_port *ap); +#else /* CONFIG_PM */ +static void ata_eh_handle_port_suspend(struct ata_port *ap) +{ } + +static void ata_eh_handle_port_resume(struct ata_port *ap) +{ } +#endif /* CONFIG_PM */ -static void ata_ering_record(struct ata_ering *ering, int is_io, +static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, + va_list args) +{ + ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, + ATA_EH_DESC_LEN - ehi->desc_len, + fmt, args); +} + +/** + * __ata_ehi_push_desc - push error description without adding separator + * @ehi: target EHI + * @fmt: printf format string + * + * Format string according to @fmt and append it to @ehi->desc. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + __ata_ehi_pushv_desc(ehi, fmt, args); + va_end(args); +} + +/** + * ata_ehi_push_desc - push error description with separator + * @ehi: target EHI + * @fmt: printf format string + * + * Format string according to @fmt and append it to @ehi->desc. + * If @ehi->desc is not empty, ", " is added in-between. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) +{ + va_list args; + + if (ehi->desc_len) + __ata_ehi_push_desc(ehi, ", "); + + va_start(args, fmt); + __ata_ehi_pushv_desc(ehi, fmt, args); + va_end(args); +} + +/** + * ata_ehi_clear_desc - clean error description + * @ehi: target EHI + * + * Clear @ehi->desc. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_ehi_clear_desc(struct ata_eh_info *ehi) +{ + ehi->desc[0] = '\0'; + ehi->desc_len = 0; +} + +/** + * ata_port_desc - append port description + * @ap: target ATA port + * @fmt: printf format string + * + * Format string according to @fmt and append it to port + * description. If port description is not empty, " " is added + * in-between. This function is to be used while initializing + * ata_host. The description is printed on host registration. + * + * LOCKING: + * None. + */ +void ata_port_desc(struct ata_port *ap, const char *fmt, ...) +{ + va_list args; + + WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); + + if (ap->link.eh_info.desc_len) + __ata_ehi_push_desc(&ap->link.eh_info, " "); + + va_start(args, fmt); + __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); + va_end(args); +} + +#ifdef CONFIG_PCI + +/** + * ata_port_pbar_desc - append PCI BAR description + * @ap: target ATA port + * @bar: target PCI BAR + * @offset: offset into PCI BAR + * @name: name of the area + * + * If @offset is negative, this function formats a string which + * contains the name, address, size and type of the BAR and + * appends it to the port description. If @offset is zero or + * positive, only name and offsetted address is appended. + * + * LOCKING: + * None. + */ +void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, + const char *name) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + char *type = ""; + unsigned long long start, len; + + if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) + type = "m"; + else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) + type = "i"; + + start = (unsigned long long)pci_resource_start(pdev, bar); + len = (unsigned long long)pci_resource_len(pdev, bar); + + if (offset < 0) + ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); + else + ata_port_desc(ap, "%s 0x%llx", name, + start + (unsigned long long)offset); +} + +#endif /* CONFIG_PCI */ + +static int ata_lookup_timeout_table(u8 cmd) +{ + int i; + + for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { + const u8 *cur; + + for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) + if (*cur == cmd) + return i; + } + + return -1; +} + +/** + * ata_internal_cmd_timeout - determine timeout for an internal command + * @dev: target device + * @cmd: internal command to be issued + * + * Determine timeout for internal command @cmd for @dev. + * + * LOCKING: + * EH context. + * + * RETURNS: + * Determined timeout. + */ +unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) +{ + struct ata_eh_context *ehc = &dev->link->eh_context; + int ent = ata_lookup_timeout_table(cmd); + int idx; + + if (ent < 0) + return ATA_EH_CMD_DFL_TIMEOUT; + + idx = ehc->cmd_timeout_idx[dev->devno][ent]; + return ata_eh_cmd_timeout_table[ent].timeouts[idx]; +} + +/** + * ata_internal_cmd_timed_out - notification for internal command timeout + * @dev: target device + * @cmd: internal command which timed out + * + * Notify EH that internal command @cmd for @dev timed out. This + * function should be called only for commands whose timeouts are + * determined using ata_internal_cmd_timeout(). + * + * LOCKING: + * EH context. + */ +void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) +{ + struct ata_eh_context *ehc = &dev->link->eh_context; + int ent = ata_lookup_timeout_table(cmd); + int idx; + + if (ent < 0) + return; + + idx = ehc->cmd_timeout_idx[dev->devno][ent]; + if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) + ehc->cmd_timeout_idx[dev->devno][ent]++; +} + +static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, unsigned int err_mask) { struct ata_ering_entry *ent; @@ -60,22 +385,23 @@ static void ata_ering_record(struct ata_ering *ering, int is_io, ering->cursor %= ATA_ERING_SIZE; ent = &ering->ring[ering->cursor]; - ent->is_io = is_io; + ent->eflags = eflags; ent->err_mask = err_mask; ent->timestamp = get_jiffies_64(); } -static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering) +static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) { struct ata_ering_entry *ent = &ering->ring[ering->cursor]; - if (!ent->err_mask) - return NULL; - return ent; + + if (ent->err_mask) + return ent; + return NULL; } -static int ata_ering_map(struct ata_ering *ering, - int (*map_fn)(struct ata_ering_entry *, void *), - void *arg) +int ata_ering_map(struct ata_ering *ering, + int (*map_fn)(struct ata_ering_entry *, void *), + void *arg) { int idx, rc = 0; struct ata_ering_entry *ent; @@ -94,30 +420,42 @@ static int ata_ering_map(struct ata_ering *ering, return rc; } +static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) +{ + ent->eflags |= ATA_EFLAG_OLD_ER; + return 0; +} + +static void ata_ering_clear(struct ata_ering *ering) +{ + ata_ering_map(ering, ata_ering_clear_cb, NULL); +} + static unsigned int ata_eh_dev_action(struct ata_device *dev) { - struct ata_eh_context *ehc = &dev->ap->eh_context; + struct ata_eh_context *ehc = &dev->link->eh_context; return ehc->i.action | ehc->i.dev_action[dev->devno]; } -static void ata_eh_clear_action(struct ata_device *dev, +static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, struct ata_eh_info *ehi, unsigned int action) { - int i; + struct ata_device *tdev; if (!dev) { ehi->action &= ~action; - for (i = 0; i < ATA_MAX_DEVICES; i++) - ehi->dev_action[i] &= ~action; + ata_for_each_dev(tdev, link, ALL) + ehi->dev_action[tdev->devno] &= ~action; } else { /* doesn't make sense for port-wide EH actions */ WARN_ON(!(action & ATA_EH_PERDEV_MASK)); /* break ehi->action into ehi->dev_action */ if (ehi->action & action) { - for (i = 0; i < ATA_MAX_DEVICES; i++) - ehi->dev_action[i] |= ehi->action & action; + ata_for_each_dev(tdev, link, ALL) + ehi->dev_action[tdev->devno] |= + ehi->action & action; ehi->action &= ~action; } @@ -127,6 +465,41 @@ static void ata_eh_clear_action(struct ata_device *dev, } /** + * ata_eh_acquire - acquire EH ownership + * @ap: ATA port to acquire EH ownership for + * + * Acquire EH ownership for @ap. This is the basic exclusion + * mechanism for ports sharing a host. Only one port hanging off + * the same host can claim the ownership of EH. + * + * LOCKING: + * EH context. + */ +void ata_eh_acquire(struct ata_port *ap) +{ + mutex_lock(&ap->host->eh_mutex); + WARN_ON_ONCE(ap->host->eh_owner); + ap->host->eh_owner = current; +} + +/** + * ata_eh_release - release EH ownership + * @ap: ATA port to release EH ownership for + * + * Release EH ownership for @ap if the caller. The caller must + * have acquired EH ownership using ata_eh_acquire() previously. + * + * LOCKING: + * EH context. + */ +void ata_eh_release(struct ata_port *ap) +{ + WARN_ON_ONCE(ap->host->eh_owner != current); + ap->host->eh_owner = NULL; + mutex_unlock(&ap->host->eh_mutex); +} + +/** * ata_scsi_timed_out - SCSI layer time out callback * @cmd: timed out SCSI command * @@ -145,29 +518,29 @@ static void ata_eh_clear_action(struct ata_device *dev, * RETURNS: * EH_HANDLED or EH_NOT_HANDLED */ -enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) +enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct ata_port *ap = ata_shost_to_port(host); unsigned long flags; struct ata_queued_cmd *qc; - enum scsi_eh_timer_return ret; + enum blk_eh_timer_return ret; DPRINTK("ENTER\n"); if (ap->ops->error_handler) { - ret = EH_NOT_HANDLED; + ret = BLK_EH_NOT_HANDLED; goto out; } - ret = EH_HANDLED; + ret = BLK_EH_HANDLED; spin_lock_irqsave(ap->lock, flags); - qc = ata_qc_from_tag(ap, ap->active_tag); + qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) { WARN_ON(qc->scsicmd != cmd); qc->flags |= ATA_QCFLAG_EH_SCHEDULED; qc->err_mask |= AC_ERR_TIMEOUT; - ret = EH_NOT_HANDLED; + ret = BLK_EH_NOT_HANDLED; } spin_unlock_irqrestore(ap->lock, flags); @@ -176,6 +549,31 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) return ret; } +static void ata_eh_unload(struct ata_port *ap) +{ + struct ata_link *link; + struct ata_device *dev; + unsigned long flags; + + /* Restore SControl IPM and SPD for the next driver and + * disable attached devices. + */ + ata_for_each_link(link, ap, PMP_FIRST) { + sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); + ata_for_each_dev(dev, link, ALL) + ata_dev_disable(dev); + } + + /* freeze and set UNLOADED */ + spin_lock_irqsave(ap->lock, flags); + + ata_port_freeze(ap); /* won't be thawed */ + ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ + ap->pflags |= ATA_PFLAG_UNLOADED; + + spin_unlock_irqrestore(ap->lock, flags); +} + /** * ata_scsi_error - SCSI layer error handler callback * @host: SCSI host on which error occurred @@ -191,19 +589,51 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) void ata_scsi_error(struct Scsi_Host *host) { struct ata_port *ap = ata_shost_to_port(host); - int i, repeat_cnt = ATA_EH_MAX_REPEAT; unsigned long flags; + LIST_HEAD(eh_work_q); DPRINTK("ENTER\n"); - /* synchronize with port task */ - ata_port_flush_task(ap); + spin_lock_irqsave(host->host_lock, flags); + list_splice_init(&host->eh_cmd_q, &eh_work_q); + spin_unlock_irqrestore(host->host_lock, flags); + + ata_scsi_cmd_error_handler(host, ap, &eh_work_q); + + /* If we timed raced normal completion and there is nothing to + recover nr_timedout == 0 why exactly are we doing error recovery ? */ + ata_scsi_port_error_handler(host, ap); + + /* finish or retry handled scmd's and clean up */ + WARN_ON(host->host_failed || !list_empty(&eh_work_q)); + + DPRINTK("EXIT\n"); +} + +/** + * ata_scsi_cmd_error_handler - error callback for a list of commands + * @host: scsi host containing the port + * @ap: ATA port within the host + * @eh_work_q: list of commands to process + * + * process the given list of commands and return those finished to the + * ap->eh_done_q. This function is the first part of the libata error + * handler which processes a given list of failed commands. + */ +void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, + struct list_head *eh_work_q) +{ + int i; + unsigned long flags; + + /* make sure sff pio task is not running */ + ata_sff_flush_pio_task(ap); /* synchronize with host lock and sort out timeouts */ /* For new EH, all qcs are finished in one of three ways - * normal completion, error completion, and SCSI timeout. - * Both cmpletions can race against SCSI timeout. When normal + * Both completions can race against SCSI timeout. When normal * completion wins, the qc never reaches EH. When error * completion wins, the qc has ATA_QCFLAG_FAILED set. * @@ -219,7 +649,19 @@ void ata_scsi_error(struct Scsi_Host *host) spin_lock_irqsave(ap->lock, flags); - list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { + /* This must occur under the ap->lock as we don't want + a polled recovery to race the real interrupt handler + + The lost_interrupt handler checks for any completed but + non-notified command and completes much like an IRQ handler. + + We then fall into the error recovery code which will treat + this as if normal completion won the race */ + + if (ap->ops->lost_interrupt) + ap->ops->lost_interrupt(ap); + + list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { struct ata_queued_cmd *qc; for (i = 0; i < ATA_MAX_QUEUE; i++) { @@ -257,73 +699,115 @@ void ata_scsi_error(struct Scsi_Host *host) __ata_port_freeze(ap); spin_unlock_irqrestore(ap->lock, flags); + + /* initialize eh_tries */ + ap->eh_tries = ATA_EH_MAX_TRIES; } else spin_unlock_wait(ap->lock); - repeat: +} +EXPORT_SYMBOL(ata_scsi_cmd_error_handler); + +/** + * ata_scsi_port_error_handler - recover the port after the commands + * @host: SCSI host containing the port + * @ap: the ATA port + * + * Handle the recovery of the port @ap after all the commands + * have been recovered. + */ +void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) +{ + unsigned long flags; + /* invoke error handler */ if (ap->ops->error_handler) { + struct ata_link *link; + + /* acquire EH ownership */ + ata_eh_acquire(ap); + repeat: + /* kill fast drain timer */ + del_timer_sync(&ap->fastdrain_timer); + /* process port resume request */ ata_eh_handle_port_resume(ap); /* fetch & clear EH info */ spin_lock_irqsave(ap->lock, flags); - memset(&ap->eh_context, 0, sizeof(ap->eh_context)); - ap->eh_context.i = ap->eh_info; - memset(&ap->eh_info, 0, sizeof(ap->eh_info)); + ata_for_each_link(link, ap, HOST_FIRST) { + struct ata_eh_context *ehc = &link->eh_context; + struct ata_device *dev; + + memset(&link->eh_context, 0, sizeof(link->eh_context)); + link->eh_context.i = link->eh_info; + memset(&link->eh_info, 0, sizeof(link->eh_info)); + + ata_for_each_dev(dev, link, ENABLED) { + int devno = dev->devno; + + ehc->saved_xfer_mode[devno] = dev->xfer_mode; + if (ata_ncq_enabled(dev)) + ehc->saved_ncq_enabled |= 1 << devno; + } + } ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; ap->pflags &= ~ATA_PFLAG_EH_PENDING; + ap->excl_link = NULL; /* don't maintain exclusion over EH */ spin_unlock_irqrestore(ap->lock, flags); /* invoke EH, skip if unloading or suspended */ if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) ap->ops->error_handler(ap); - else + else { + /* if unloading, commence suicide */ + if ((ap->pflags & ATA_PFLAG_UNLOADING) && + !(ap->pflags & ATA_PFLAG_UNLOADED)) + ata_eh_unload(ap); ata_eh_finish(ap); + } /* process port suspend request */ ata_eh_handle_port_suspend(ap); - /* Exception might have happend after ->error_handler + /* Exception might have happened after ->error_handler * recovered the port but before this point. Repeat * EH in such case. */ spin_lock_irqsave(ap->lock, flags); if (ap->pflags & ATA_PFLAG_EH_PENDING) { - if (--repeat_cnt) { - ata_port_printk(ap, KERN_INFO, - "EH pending after completion, " - "repeating EH (cnt=%d)\n", repeat_cnt); + if (--ap->eh_tries) { spin_unlock_irqrestore(ap->lock, flags); goto repeat; } - ata_port_printk(ap, KERN_ERR, "EH pending after %d " - "tries, giving up\n", ATA_EH_MAX_REPEAT); + ata_port_err(ap, + "EH pending after %d tries, giving up\n", + ATA_EH_MAX_TRIES); + ap->pflags &= ~ATA_PFLAG_EH_PENDING; } /* this run is complete, make sure EH info is clear */ - memset(&ap->eh_info, 0, sizeof(ap->eh_info)); + ata_for_each_link(link, ap, HOST_FIRST) + memset(&link->eh_info, 0, sizeof(link->eh_info)); - /* Clear host_eh_scheduled while holding ap->lock such - * that if exception occurs after this point but - * before EH completion, SCSI midlayer will + /* end eh (clear host_eh_scheduled) while holding + * ap->lock such that if exception occurs after this + * point but before EH completion, SCSI midlayer will * re-initiate EH. */ - host->host_eh_scheduled = 0; + ap->ops->end_eh(ap); spin_unlock_irqrestore(ap->lock, flags); + ata_eh_release(ap); } else { - WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); + WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); ap->ops->eng_timeout(ap); } - /* finish or retry handled scmd's and clean up */ - WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); - scsi_eh_flush_done_q(&ap->eh_done_q); /* clean up */ @@ -332,10 +816,10 @@ void ata_scsi_error(struct Scsi_Host *host) if (ap->pflags & ATA_PFLAG_LOADING) ap->pflags &= ~ATA_PFLAG_LOADING; else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) - queue_work(ata_aux_wq, &ap->hotplug_task); + schedule_delayed_work(&ap->hotplug_task, 0); if (ap->pflags & ATA_PFLAG_RECOVERED) - ata_port_printk(ap, KERN_INFO, "EH complete\n"); + ata_port_info(ap, "EH complete\n"); ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); @@ -344,9 +828,8 @@ void ata_scsi_error(struct Scsi_Host *host) wake_up_all(&ap->eh_wait_q); spin_unlock_irqrestore(ap->lock, flags); - - DPRINTK("EXIT\n"); } +EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); /** * ata_port_wait_eh - Wait for the currently pending EH to complete @@ -377,104 +860,99 @@ void ata_port_wait_eh(struct ata_port *ap) /* make sure SCSI EH is complete */ if (scsi_host_in_recovery(ap->scsi_host)) { - msleep(10); + ata_msleep(ap, 10); goto retry; } } +EXPORT_SYMBOL_GPL(ata_port_wait_eh); -/** - * ata_qc_timeout - Handle timeout of queued command - * @qc: Command that timed out - * - * Some part of the kernel (currently, only the SCSI layer) - * has noticed that the active command on port @ap has not - * completed after a specified length of time. Handle this - * condition by disabling DMA (if necessary) and completing - * transactions, with error if necessary. - * - * This also handles the case of the "lost interrupt", where - * for some reason (possibly hardware bug, possibly driver bug) - * an interrupt was not delivered to the driver, even though the - * transaction completed successfully. - * - * TODO: kill this function once old EH is gone. - * - * LOCKING: - * Inherited from SCSI layer (none, can sleep) - */ -static void ata_qc_timeout(struct ata_queued_cmd *qc) +static int ata_eh_nr_in_flight(struct ata_port *ap) { - struct ata_port *ap = qc->ap; - u8 host_stat = 0, drv_stat; - unsigned long flags; + unsigned int tag; + int nr = 0; - DPRINTK("ENTER\n"); - - ap->hsm_task_state = HSM_ST_IDLE; - - spin_lock_irqsave(ap->lock, flags); + /* count only non-internal commands */ + for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) + if (ata_qc_from_tag(ap, tag)) + nr++; - switch (qc->tf.protocol) { + return nr; +} - case ATA_PROT_DMA: - case ATA_PROT_ATAPI_DMA: - host_stat = ap->ops->bmdma_status(ap); +void ata_eh_fastdrain_timerfn(unsigned long arg) +{ + struct ata_port *ap = (void *)arg; + unsigned long flags; + int cnt; - /* before we do anything else, clear DMA-Start bit */ - ap->ops->bmdma_stop(qc); + spin_lock_irqsave(ap->lock, flags); - /* fall through */ + cnt = ata_eh_nr_in_flight(ap); - default: - ata_altstatus(ap); - drv_stat = ata_chk_status(ap); + /* are we done? */ + if (!cnt) + goto out_unlock; - /* ack bmdma irq events */ - ap->ops->irq_clear(ap); + if (cnt == ap->fastdrain_cnt) { + unsigned int tag; - ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, " - "stat 0x%x host_stat 0x%x\n", - qc->tf.command, drv_stat, host_stat); + /* No progress during the last interval, tag all + * in-flight qcs as timed out and freeze the port. + */ + for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { + struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); + if (qc) + qc->err_mask |= AC_ERR_TIMEOUT; + } - /* complete taskfile transaction */ - qc->err_mask |= AC_ERR_TIMEOUT; - break; + ata_port_freeze(ap); + } else { + /* some qcs have finished, give it another chance */ + ap->fastdrain_cnt = cnt; + ap->fastdrain_timer.expires = + ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); + add_timer(&ap->fastdrain_timer); } + out_unlock: spin_unlock_irqrestore(ap->lock, flags); - - ata_eh_qc_complete(qc); - - DPRINTK("EXIT\n"); } /** - * ata_eng_timeout - Handle timeout of queued command - * @ap: Port on which timed-out command is active - * - * Some part of the kernel (currently, only the SCSI layer) - * has noticed that the active command on port @ap has not - * completed after a specified length of time. Handle this - * condition by disabling DMA (if necessary) and completing - * transactions, with error if necessary. - * - * This also handles the case of the "lost interrupt", where - * for some reason (possibly hardware bug, possibly driver bug) - * an interrupt was not delivered to the driver, even though the - * transaction completed successfully. + * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain + * @ap: target ATA port + * @fastdrain: activate fast drain * - * TODO: kill this function once old EH is gone. + * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain + * is non-zero and EH wasn't pending before. Fast drain ensures + * that EH kicks in in timely manner. * * LOCKING: - * Inherited from SCSI layer (none, can sleep) + * spin_lock_irqsave(host lock) */ -void ata_eng_timeout(struct ata_port *ap) +static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) { - DPRINTK("ENTER\n"); + int cnt; - ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag)); + /* already scheduled? */ + if (ap->pflags & ATA_PFLAG_EH_PENDING) + return; - DPRINTK("EXIT\n"); + ap->pflags |= ATA_PFLAG_EH_PENDING; + + if (!fastdrain) + return; + + /* do we have in-flight qcs? */ + cnt = ata_eh_nr_in_flight(ap); + if (!cnt) + return; + + /* activate fast drain */ + ap->fastdrain_cnt = cnt; + ap->fastdrain_timer.expires = + ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); + add_timer(&ap->fastdrain_timer); } /** @@ -490,62 +968,95 @@ void ata_eng_timeout(struct ata_port *ap) void ata_qc_schedule_eh(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + struct request_queue *q = qc->scsicmd->device->request_queue; + unsigned long flags; WARN_ON(!ap->ops->error_handler); qc->flags |= ATA_QCFLAG_FAILED; - qc->ap->pflags |= ATA_PFLAG_EH_PENDING; + ata_eh_set_pending(ap, 1); /* The following will fail if timeout has already expired. * ata_scsi_error() takes care of such scmds on EH entry. * Note that ATA_QCFLAG_FAILED is unconditionally set after * this function completes. */ - scsi_req_abort_cmd(qc->scsicmd); + spin_lock_irqsave(q->queue_lock, flags); + blk_abort_request(qc->scsicmd->request); + spin_unlock_irqrestore(q->queue_lock, flags); } /** - * ata_port_schedule_eh - schedule error handling without a qc - * @ap: ATA port to schedule EH for + * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine + * @ap: ATA port to schedule EH for * - * Schedule error handling for @ap. EH will kick in as soon as - * all commands are drained. - * - * LOCKING: + * LOCKING: inherited from ata_port_schedule_eh * spin_lock_irqsave(host lock) */ -void ata_port_schedule_eh(struct ata_port *ap) +void ata_std_sched_eh(struct ata_port *ap) { WARN_ON(!ap->ops->error_handler); - ap->pflags |= ATA_PFLAG_EH_PENDING; + if (ap->pflags & ATA_PFLAG_INITIALIZING) + return; + + ata_eh_set_pending(ap, 1); scsi_schedule_eh(ap->scsi_host); DPRINTK("port EH scheduled\n"); } +EXPORT_SYMBOL_GPL(ata_std_sched_eh); /** - * ata_port_abort - abort all qc's on the port - * @ap: ATA port to abort qc's for + * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine + * @ap: ATA port to end EH for * - * Abort all active qc's of @ap and schedule EH. + * In the libata object model there is a 1:1 mapping of ata_port to + * shost, so host fields can be directly manipulated under ap->lock, in + * the libsas case we need to hold a lock at the ha->level to coordinate + * these events. * * LOCKING: * spin_lock_irqsave(host lock) + */ +void ata_std_end_eh(struct ata_port *ap) +{ + struct Scsi_Host *host = ap->scsi_host; + + host->host_eh_scheduled = 0; +} +EXPORT_SYMBOL(ata_std_end_eh); + + +/** + * ata_port_schedule_eh - schedule error handling without a qc + * @ap: ATA port to schedule EH for * - * RETURNS: - * Number of aborted qc's. + * Schedule error handling for @ap. EH will kick in as soon as + * all commands are drained. + * + * LOCKING: + * spin_lock_irqsave(host lock) */ -int ata_port_abort(struct ata_port *ap) +void ata_port_schedule_eh(struct ata_port *ap) +{ + /* see: ata_std_sched_eh, unless you know better */ + ap->ops->sched_eh(ap); +} + +static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) { int tag, nr_aborted = 0; WARN_ON(!ap->ops->error_handler); + /* we're gonna abort all commands, no need for fast drain */ + ata_eh_set_pending(ap, 0); + for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); - if (qc) { + if (qc && (!link || qc->dev->link == link)) { qc->flags |= ATA_QCFLAG_FAILED; ata_qc_complete(qc); nr_aborted++; @@ -559,6 +1070,40 @@ int ata_port_abort(struct ata_port *ap) } /** + * ata_link_abort - abort all qc's on the link + * @link: ATA link to abort qc's for + * + * Abort all active qc's active on @link and schedule EH. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * Number of aborted qc's. + */ +int ata_link_abort(struct ata_link *link) +{ + return ata_do_link_abort(link->ap, link); +} + +/** + * ata_port_abort - abort all qc's on the port + * @ap: ATA port to abort qc's for + * + * Abort all active qc's of @ap and schedule EH. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * Number of aborted qc's. + */ +int ata_port_abort(struct ata_port *ap) +{ + return ata_do_link_abort(ap, NULL); +} + +/** * __ata_port_freeze - freeze port * @ap: ATA port to freeze * @@ -585,14 +1130,16 @@ static void __ata_port_freeze(struct ata_port *ap) ap->pflags |= ATA_PFLAG_FROZEN; - DPRINTK("ata%u port frozen\n", ap->id); + DPRINTK("ata%u port frozen\n", ap->print_id); } /** * ata_port_freeze - abort & freeze port * @ap: ATA port to freeze * - * Abort and freeze @ap. + * Abort and freeze @ap. The freeze operation must be called + * first, because some hardware requires special operations + * before the taskfile registers are accessible. * * LOCKING: * spin_lock_irqsave(host lock) @@ -606,13 +1153,86 @@ int ata_port_freeze(struct ata_port *ap) WARN_ON(!ap->ops->error_handler); - nr_aborted = ata_port_abort(ap); __ata_port_freeze(ap); + nr_aborted = ata_port_abort(ap); return nr_aborted; } /** + * sata_async_notification - SATA async notification handler + * @ap: ATA port where async notification is received + * + * Handler to be called when async notification via SDB FIS is + * received. This function schedules EH if necessary. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * 1 if EH is scheduled, 0 otherwise. + */ +int sata_async_notification(struct ata_port *ap) +{ + u32 sntf; + int rc; + + if (!(ap->flags & ATA_FLAG_AN)) + return 0; + + rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); + if (rc == 0) + sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); + + if (!sata_pmp_attached(ap) || rc) { + /* PMP is not attached or SNTF is not available */ + if (!sata_pmp_attached(ap)) { + /* PMP is not attached. Check whether ATAPI + * AN is configured. If so, notify media + * change. + */ + struct ata_device *dev = ap->link.device; + + if ((dev->class == ATA_DEV_ATAPI) && + (dev->flags & ATA_DFLAG_AN)) + ata_scsi_media_change_notify(dev); + return 0; + } else { + /* PMP is attached but SNTF is not available. + * ATAPI async media change notification is + * not used. The PMP must be reporting PHY + * status change, schedule EH. + */ + ata_port_schedule_eh(ap); + return 1; + } + } else { + /* PMP is attached and SNTF is available */ + struct ata_link *link; + + /* check and notify ATAPI AN */ + ata_for_each_link(link, ap, EDGE) { + if (!(sntf & (1 << link->pmp))) + continue; + + if ((link->device->class == ATA_DEV_ATAPI) && + (link->device->flags & ATA_DFLAG_AN)) + ata_scsi_media_change_notify(link->device); + } + + /* If PMP is reporting that PHY status of some + * downstream ports has changed, schedule EH. + */ + if (sntf & (1 << SATA_PMP_CTRL_PORT)) { + ata_port_schedule_eh(ap); + return 1; + } + + return 0; + } +} + +/** * ata_eh_freeze_port - EH helper to freeze port * @ap: ATA port to freeze * @@ -658,7 +1278,7 @@ void ata_eh_thaw_port(struct ata_port *ap) spin_unlock_irqrestore(ap->lock, flags); - DPRINTK("ata%u port thawed\n", ap->id); + DPRINTK("ata%u port thawed\n", ap->print_id); } static void ata_eh_scsidone(struct scsi_cmnd *scmd) @@ -703,18 +1323,44 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc) * should be retried. To be used from EH. * * SCSI midlayer limits the number of retries to scmd->allowed. - * scmd->retries is decremented for commands which get retried + * scmd->allowed is incremented for commands which get retried * due to unrelated failures (qc->err_mask is zero). */ void ata_eh_qc_retry(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; - if (!qc->err_mask && scmd->retries) - scmd->retries--; + if (!qc->err_mask) + scmd->allowed++; __ata_eh_qc_complete(qc); } /** + * ata_dev_disable - disable ATA device + * @dev: ATA device to disable + * + * Disable @dev. + * + * Locking: + * EH context. + */ +void ata_dev_disable(struct ata_device *dev) +{ + if (!ata_dev_enabled(dev)) + return; + + if (ata_msg_drv(dev->link->ap)) + ata_dev_warn(dev, "disabled\n"); + ata_acpi_on_disable(dev); + ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); + dev->class++; + + /* From now till the next successful probe, ering is used to + * track probe failures. Clear accumulated device error info. + */ + ata_ering_clear(&dev->ering); +} + +/** * ata_eh_detach_dev - detach ATA device * @dev: ATA device to detach * @@ -723,9 +1369,11 @@ void ata_eh_qc_retry(struct ata_queued_cmd *qc) * LOCKING: * None. */ -static void ata_eh_detach_dev(struct ata_device *dev) +void ata_eh_detach_dev(struct ata_device *dev) { - struct ata_port *ap = dev->ap; + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; + struct ata_eh_context *ehc = &link->eh_context; unsigned long flags; ata_dev_disable(dev); @@ -739,51 +1387,44 @@ static void ata_eh_detach_dev(struct ata_device *dev) ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; } - /* clear per-dev EH actions */ - ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK); - ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK); + /* clear per-dev EH info */ + ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); + ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); + ehc->saved_xfer_mode[dev->devno] = 0; + ehc->saved_ncq_enabled &= ~(1 << dev->devno); spin_unlock_irqrestore(ap->lock, flags); } /** * ata_eh_about_to_do - about to perform eh_action - * @ap: target ATA port + * @link: target ATA link * @dev: target ATA dev for per-dev action (can be NULL) * @action: action about to be performed * * Called just before performing EH actions to clear related bits - * in @ap->eh_info such that eh actions are not unnecessarily + * in @link->eh_info such that eh actions are not unnecessarily * repeated. * * LOCKING: * None. */ -static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, - unsigned int action) +void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, + unsigned int action) { + struct ata_port *ap = link->ap; + struct ata_eh_info *ehi = &link->eh_info; + struct ata_eh_context *ehc = &link->eh_context; unsigned long flags; - struct ata_eh_info *ehi = &ap->eh_info; - struct ata_eh_context *ehc = &ap->eh_context; spin_lock_irqsave(ap->lock, flags); - /* Reset is represented by combination of actions and EHI - * flags. Suck in all related bits before clearing eh_info to - * avoid losing requested action. - */ - if (action & ATA_EH_RESET_MASK) { - ehc->i.action |= ehi->action & ATA_EH_RESET_MASK; - ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK; - - /* make sure all reset actions are cleared & clear EHI flags */ - action |= ATA_EH_RESET_MASK; - ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK; - } - - ata_eh_clear_action(dev, ehi, action); + ata_eh_clear_action(link, dev, ehi, action); - if (!(ehc->i.flags & ATA_EHI_QUIET)) + /* About to take EH action, set RECOVERED. Ignore actions on + * slave links as master will do them again. + */ + if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) ap->pflags |= ATA_PFLAG_RECOVERED; spin_unlock_irqrestore(ap->lock, flags); @@ -791,26 +1432,22 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, /** * ata_eh_done - EH action complete - * @ap: target ATA port +* @ap: target ATA port * @dev: target ATA dev for per-dev action (can be NULL) * @action: action just completed * * Called right after performing EH actions to clear related bits - * in @ap->eh_context. + * in @link->eh_context. * * LOCKING: * None. */ -static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, - unsigned int action) +void ata_eh_done(struct ata_link *link, struct ata_device *dev, + unsigned int action) { - /* if reset is complete, clear all reset actions & reset modifier */ - if (action & ATA_EH_RESET_MASK) { - action |= ATA_EH_RESET_MASK; - ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK; - } + struct ata_eh_context *ehc = &link->eh_context; - ata_eh_clear_action(dev, &ap->eh_context.i, action); + ata_eh_clear_action(link, dev, &ehc->i, action); } /** @@ -827,7 +1464,7 @@ static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, * RETURNS: * Descriptive string for @err_mask */ -static const char * ata_err_string(unsigned int err_mask) +static const char *ata_err_string(unsigned int err_mask) { if (err_mask & AC_ERR_HOST_BUS) return "host bus error"; @@ -851,6 +1488,7 @@ static const char * ata_err_string(unsigned int err_mask) /** * ata_read_log_page - read a specific log page * @dev: target device + * @log: log to read * @page: page to read * @buf: buffer to store read page * @sectors: number of sectors to read @@ -863,24 +1501,25 @@ static const char * ata_err_string(unsigned int err_mask) * RETURNS: * 0 on success, AC_ERR_* mask otherwise. */ -static unsigned int ata_read_log_page(struct ata_device *dev, - u8 page, void *buf, unsigned int sectors) +unsigned int ata_read_log_page(struct ata_device *dev, u8 log, + u8 page, void *buf, unsigned int sectors) { struct ata_taskfile tf; unsigned int err_mask; - DPRINTK("read log page - page %d\n", page); + DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); ata_tf_init(dev, &tf); tf.command = ATA_CMD_READ_LOG_EXT; - tf.lbal = page; + tf.lbal = log; + tf.lbam = page; tf.nsect = sectors; tf.hob_nsect = sectors >> 8; tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_PIO; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, - buf, sectors * ATA_SECT_SIZE); + buf, sectors * ATA_SECT_SIZE, 0); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; @@ -904,12 +1543,12 @@ static unsigned int ata_read_log_page(struct ata_device *dev, static int ata_eh_read_log_10h(struct ata_device *dev, int *tag, struct ata_taskfile *tf) { - u8 *buf = dev->ap->sector_buf; + u8 *buf = dev->link->ap->sector_buf; unsigned int err_mask; u8 csum; int i; - err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); + err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1); if (err_mask) return -EIO; @@ -917,8 +1556,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, for (i = 0; i < ATA_SECT_SIZE; i++) csum += buf[i]; if (csum) - ata_dev_printk(dev, KERN_WARNING, - "invalid checksum 0x%x on log page 10h\n", csum); + ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", + csum); if (buf[0] & 0x80) return -ENOENT; @@ -941,9 +1580,41 @@ static int ata_eh_read_log_10h(struct ata_device *dev, } /** + * atapi_eh_tur - perform ATAPI TEST_UNIT_READY + * @dev: target ATAPI device + * @r_sense_key: out parameter for sense_key + * + * Perform ATAPI TEST_UNIT_READY. + * + * LOCKING: + * EH context (may sleep). + * + * RETURNS: + * 0 on success, AC_ERR_* mask on failure. + */ +unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) +{ + u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; + struct ata_taskfile tf; + unsigned int err_mask; + + ata_tf_init(dev, &tf); + + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.command = ATA_CMD_PACKET; + tf.protocol = ATAPI_PROT_NODATA; + + err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); + if (err_mask == AC_ERR_DEV) + *r_sense_key = tf.feature >> 4; + return err_mask; +} + +/** * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE * @dev: device to perform REQUEST_SENSE to * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) + * @dfl_sense_key: default sense key to use * * Perform ATAPI REQUEST_SENSE after the device reported CHECK * SENSE. This function is EH helper. @@ -954,51 +1625,48 @@ static int ata_eh_read_log_10h(struct ata_device *dev, * RETURNS: * 0 on success, AC_ERR_* mask on failure */ -static unsigned int atapi_eh_request_sense(struct ata_device *dev, - unsigned char *sense_buf) +unsigned int atapi_eh_request_sense(struct ata_device *dev, + u8 *sense_buf, u8 dfl_sense_key) { - struct ata_port *ap = dev->ap; + u8 cdb[ATAPI_CDB_LEN] = + { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; + struct ata_port *ap = dev->link->ap; struct ata_taskfile tf; - u8 cdb[ATAPI_CDB_LEN]; DPRINTK("ATAPI request sense\n"); - ata_tf_init(dev, &tf); - /* FIXME: is this needed? */ memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); - /* XXX: why tf_read here? */ - ap->ops->tf_read(ap, &tf); - - /* fill these in, for the case where they are -not- overwritten */ + /* initialize sense_buf with the error register, + * for the case where they are -not- overwritten + */ sense_buf[0] = 0x70; - sense_buf[2] = tf.feature >> 4; + sense_buf[2] = dfl_sense_key; - memset(cdb, 0, ATAPI_CDB_LEN); - cdb[0] = REQUEST_SENSE; - cdb[4] = SCSI_SENSE_BUFFERSIZE; + /* some devices time out if garbage left in tf */ + ata_tf_init(dev, &tf); tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; /* is it pointless to prefer PIO for "safety reasons"? */ if (ap->flags & ATA_FLAG_PIO_DMA) { - tf.protocol = ATA_PROT_ATAPI_DMA; + tf.protocol = ATAPI_PROT_DMA; tf.feature |= ATAPI_PKT_DMA; } else { - tf.protocol = ATA_PROT_ATAPI; - tf.lbam = (8 * 1024) & 0xff; - tf.lbah = (8 * 1024) >> 8; + tf.protocol = ATAPI_PROT_PIO; + tf.lbam = SCSI_SENSE_BUFFERSIZE; + tf.lbah = 0; } return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, - sense_buf, SCSI_SENSE_BUFFERSIZE); + sense_buf, SCSI_SENSE_BUFFERSIZE, 0); } /** * ata_eh_analyze_serror - analyze SError for a failed port - * @ap: ATA port to analyze SError for + * @link: ATA link to analyze SError for * * Analyze SError if available and further determine cause of * failure. @@ -1006,30 +1674,39 @@ static unsigned int atapi_eh_request_sense(struct ata_device *dev, * LOCKING: * None. */ -static void ata_eh_analyze_serror(struct ata_port *ap) +static void ata_eh_analyze_serror(struct ata_link *link) { - struct ata_eh_context *ehc = &ap->eh_context; + struct ata_eh_context *ehc = &link->eh_context; u32 serror = ehc->i.serror; unsigned int err_mask = 0, action = 0; + u32 hotplug_mask; - if (serror & SERR_PERSISTENT) { - err_mask |= AC_ERR_ATA_BUS; - action |= ATA_EH_HARDRESET; - } - if (serror & - (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) { + if (serror & (SERR_PERSISTENT | SERR_DATA)) { err_mask |= AC_ERR_ATA_BUS; - action |= ATA_EH_SOFTRESET; + action |= ATA_EH_RESET; } if (serror & SERR_PROTOCOL) { err_mask |= AC_ERR_HSM; - action |= ATA_EH_SOFTRESET; + action |= ATA_EH_RESET; } if (serror & SERR_INTERNAL) { err_mask |= AC_ERR_SYSTEM; - action |= ATA_EH_SOFTRESET; + action |= ATA_EH_RESET; } - if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG)) + + /* Determine whether a hotplug event has occurred. Both + * SError.N/X are considered hotplug events for enabled or + * host links. For disabled PMP links, only N bit is + * considered as X bit is left at 1 for link plugging. + */ + if (link->lpm_policy > ATA_LPM_MAX_POWER) + hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ + else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) + hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; + else + hotplug_mask = SERR_PHYRDY_CHG; + + if (serror & hotplug_mask) ata_ehi_hotplugged(&ehc->i); ehc->i.err_mask |= err_mask; @@ -1038,7 +1715,7 @@ static void ata_eh_analyze_serror(struct ata_port *ap) /** * ata_eh_analyze_ncq_error - analyze NCQ error - * @ap: ATA port to analyze NCQ error for + * @link: ATA link to analyze NCQ error for * * Read log page 10h, determine the offending qc and acquire * error status TF. For NCQ device errors, all LLDDs have to do @@ -1048,10 +1725,11 @@ static void ata_eh_analyze_serror(struct ata_port *ap) * LOCKING: * Kernel thread context (may sleep). */ -static void ata_eh_analyze_ncq_error(struct ata_port *ap) +void ata_eh_analyze_ncq_error(struct ata_link *link) { - struct ata_eh_context *ehc = &ap->eh_context; - struct ata_device *dev = ap->device; + struct ata_port *ap = link->ap; + struct ata_eh_context *ehc = &link->eh_context; + struct ata_device *dev = link->device; struct ata_queued_cmd *qc; struct ata_taskfile tf; int tag, rc; @@ -1061,7 +1739,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap) return; /* is it NCQ device error? */ - if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) + if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) return; /* has LLDD analyzed already? */ @@ -1076,23 +1754,25 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap) } /* okay, this error is ours */ + memset(&tf, 0, sizeof(tf)); rc = ata_eh_read_log_10h(dev, &tag, &tf); if (rc) { - ata_port_printk(ap, KERN_ERR, "failed to read log page 10h " - "(errno=%d)\n", rc); + ata_link_err(link, "failed to read log page 10h (errno=%d)\n", + rc); return; } - if (!(ap->sactive & (1 << tag))) { - ata_port_printk(ap, KERN_ERR, "log page 10h reported " - "inactive tag %d\n", tag); + if (!(link->sactive & (1 << tag))) { + ata_link_err(link, "log page 10h reported inactive tag %d\n", + tag); return; } /* we've got the perpetrator, condemn it */ qc = __ata_qc_from_tag(ap, tag); memcpy(&qc->result_tf, &tf, sizeof(tf)); - qc->err_mask |= AC_ERR_DEV; + qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; + qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; ehc->i.err_mask &= ~AC_ERR_DEV; } @@ -1103,7 +1783,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap) * * Analyze taskfile of @qc and further determine cause of * failure. This function also requests ATAPI sense data if - * avaliable. + * available. * * LOCKING: * Kernel thread context (may sleep). @@ -1119,131 +1799,202 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { qc->err_mask |= AC_ERR_HSM; - return ATA_EH_SOFTRESET; + return ATA_EH_RESET; } - if (!(qc->err_mask & AC_ERR_DEV)) + if (stat & (ATA_ERR | ATA_DF)) + qc->err_mask |= AC_ERR_DEV; + else return 0; switch (qc->dev->class) { case ATA_DEV_ATA: if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; - if (err & ATA_UNC) + if (err & (ATA_UNC | ATA_AMNF)) qc->err_mask |= AC_ERR_MEDIA; if (err & ATA_IDNF) qc->err_mask |= AC_ERR_INVALID; break; case ATA_DEV_ATAPI: - tmp = atapi_eh_request_sense(qc->dev, - qc->scsicmd->sense_buffer); - if (!tmp) { - /* ATA_QCFLAG_SENSE_VALID is used to tell - * atapi_qc_complete() that sense data is - * already valid. - * - * TODO: interpret sense data and set - * appropriate err_mask. - */ - qc->flags |= ATA_QCFLAG_SENSE_VALID; - } else - qc->err_mask |= tmp; + if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { + tmp = atapi_eh_request_sense(qc->dev, + qc->scsicmd->sense_buffer, + qc->result_tf.feature >> 4); + if (!tmp) { + /* ATA_QCFLAG_SENSE_VALID is used to + * tell atapi_qc_complete() that sense + * data is already valid. + * + * TODO: interpret sense data and set + * appropriate err_mask. + */ + qc->flags |= ATA_QCFLAG_SENSE_VALID; + } else + qc->err_mask |= tmp; + } } if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) - action |= ATA_EH_SOFTRESET; + action |= ATA_EH_RESET; return action; } -static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent) +static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, + int *xfer_ok) { - if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT)) - return 1; + int base = 0; - if (ent->is_io) { - if (ent->err_mask & AC_ERR_HSM) - return 1; - if ((ent->err_mask & + if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) + *xfer_ok = 1; + + if (!*xfer_ok) + base = ATA_ECAT_DUBIOUS_NONE; + + if (err_mask & AC_ERR_ATA_BUS) + return base + ATA_ECAT_ATA_BUS; + + if (err_mask & AC_ERR_TIMEOUT) + return base + ATA_ECAT_TOUT_HSM; + + if (eflags & ATA_EFLAG_IS_IO) { + if (err_mask & AC_ERR_HSM) + return base + ATA_ECAT_TOUT_HSM; + if ((err_mask & (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) - return 2; + return base + ATA_ECAT_UNK_DEV; } return 0; } -struct speed_down_needed_arg { +struct speed_down_verdict_arg { u64 since; - int nr_errors[3]; + int xfer_ok; + int nr_errors[ATA_ECAT_NR]; }; -static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg) +static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) { - struct speed_down_needed_arg *arg = void_arg; + struct speed_down_verdict_arg *arg = void_arg; + int cat; - if (ent->timestamp < arg->since) + if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) return -1; - arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++; + cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, + &arg->xfer_ok); + arg->nr_errors[cat]++; + return 0; } /** - * ata_eh_speed_down_needed - Determine wheter speed down is necessary + * ata_eh_speed_down_verdict - Determine speed down verdict * @dev: Device of interest * * This function examines error ring of @dev and determines - * whether speed down is necessary. Speed down is necessary if - * there have been more than 3 of Cat-1 errors or 10 of Cat-2 - * errors during last 15 minutes. + * whether NCQ needs to be turned off, transfer speed should be + * stepped down, or falling back to PIO is necessary. * - * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM - * violation for known supported commands. + * ECAT_ATA_BUS : ATA_BUS error for any command * - * Cat-2 errors are unclassified DEV error for known supported - * command. + * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for + * IO commands + * + * ECAT_UNK_DEV : Unknown DEV error for IO commands + * + * ECAT_DUBIOUS_* : Identical to above three but occurred while + * data transfer hasn't been verified. + * + * Verdicts are + * + * NCQ_OFF : Turn off NCQ. + * + * SPEED_DOWN : Speed down transfer speed but don't fall back + * to PIO. + * + * FALLBACK_TO_PIO : Fall back to PIO. + * + * Even if multiple verdicts are returned, only one action is + * taken per error. An action triggered by non-DUBIOUS errors + * clears ering, while one triggered by DUBIOUS_* errors doesn't. + * This is to expedite speed down decisions right after device is + * initially configured. + * + * The followings are speed down rules. #1 and #2 deal with + * DUBIOUS errors. + * + * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors + * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. + * + * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors + * occurred during last 5 mins, NCQ_OFF. + * + * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors + * occurred during last 5 mins, FALLBACK_TO_PIO + * + * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred + * during last 10 mins, NCQ_OFF. + * + * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 + * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. * * LOCKING: * Inherited from caller. * * RETURNS: - * 1 if speed down is necessary, 0 otherwise + * OR of ATA_EH_SPDN_* flags. */ -static int ata_eh_speed_down_needed(struct ata_device *dev) +static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) { - const u64 interval = 15LLU * 60 * HZ; - static const int err_limits[3] = { -1, 3, 10 }; - struct speed_down_needed_arg arg; - struct ata_ering_entry *ent; - int err_cat; - u64 j64; + const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; + u64 j64 = get_jiffies_64(); + struct speed_down_verdict_arg arg; + unsigned int verdict = 0; - ent = ata_ering_top(&dev->ering); - if (!ent) - return 0; + /* scan past 5 mins of error history */ + memset(&arg, 0, sizeof(arg)); + arg.since = j64 - min(j64, j5mins); + ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); - err_cat = ata_eh_categorize_ering_entry(ent); - if (err_cat == 0) - return 0; + if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + + arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) + verdict |= ATA_EH_SPDN_SPEED_DOWN | + ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; + + if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + + arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) + verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; + + if (arg.nr_errors[ATA_ECAT_ATA_BUS] + + arg.nr_errors[ATA_ECAT_TOUT_HSM] + + arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) + verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; + /* scan past 10 mins of error history */ memset(&arg, 0, sizeof(arg)); + arg.since = j64 - min(j64, j10mins); + ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); - j64 = get_jiffies_64(); - if (j64 >= interval) - arg.since = j64 - interval; - else - arg.since = 0; + if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + + arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) + verdict |= ATA_EH_SPDN_NCQ_OFF; - ata_ering_map(&dev->ering, speed_down_needed_cb, &arg); + if (arg.nr_errors[ATA_ECAT_ATA_BUS] + + arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || + arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) + verdict |= ATA_EH_SPDN_SPEED_DOWN; - return arg.nr_errors[err_cat] > err_limits[err_cat]; + return verdict; } /** * ata_eh_speed_down - record error and speed down if necessary * @dev: Failed device - * @is_io: Did the device fail during normal IO? + * @eflags: mask of ATA_EFLAG_* flags * @err_mask: err_mask of the error * * Record error and examine error history to determine whether @@ -1255,49 +2006,122 @@ static int ata_eh_speed_down_needed(struct ata_device *dev) * Kernel thread context (may sleep). * * RETURNS: - * 0 on success, -errno otherwise + * Determined recovery action. */ -static int ata_eh_speed_down(struct ata_device *dev, int is_io, - unsigned int err_mask) +static unsigned int ata_eh_speed_down(struct ata_device *dev, + unsigned int eflags, unsigned int err_mask) { - if (!err_mask) + struct ata_link *link = ata_dev_phys_link(dev); + int xfer_ok = 0; + unsigned int verdict; + unsigned int action = 0; + + /* don't bother if Cat-0 error */ + if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) return 0; /* record error and determine whether speed down is necessary */ - ata_ering_record(&dev->ering, is_io, err_mask); + ata_ering_record(&dev->ering, eflags, err_mask); + verdict = ata_eh_speed_down_verdict(dev); + + /* turn off NCQ? */ + if ((verdict & ATA_EH_SPDN_NCQ_OFF) && + (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | + ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { + dev->flags |= ATA_DFLAG_NCQ_OFF; + ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); + goto done; + } - if (!ata_eh_speed_down_needed(dev)) - return 0; + /* speed down? */ + if (verdict & ATA_EH_SPDN_SPEED_DOWN) { + /* speed down SATA link speed if possible */ + if (sata_down_spd_limit(link, 0) == 0) { + action |= ATA_EH_RESET; + goto done; + } + + /* lower transfer mode */ + if (dev->spdn_cnt < 2) { + static const int dma_dnxfer_sel[] = + { ATA_DNXFER_DMA, ATA_DNXFER_40C }; + static const int pio_dnxfer_sel[] = + { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; + int sel; + + if (dev->xfer_shift != ATA_SHIFT_PIO) + sel = dma_dnxfer_sel[dev->spdn_cnt]; + else + sel = pio_dnxfer_sel[dev->spdn_cnt]; + + dev->spdn_cnt++; - /* speed down SATA link speed if possible */ - if (sata_down_spd_limit(dev->ap) == 0) - return ATA_EH_HARDRESET; + if (ata_down_xfermask_limit(dev, sel) == 0) { + action |= ATA_EH_RESET; + goto done; + } + } + } - /* lower transfer mode */ - if (ata_down_xfermask_limit(dev, 0) == 0) - return ATA_EH_SOFTRESET; + /* Fall back to PIO? Slowing down to PIO is meaningless for + * SATA ATA devices. Consider it only for PATA and SATAPI. + */ + if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && + (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && + (dev->xfer_shift != ATA_SHIFT_PIO)) { + if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { + dev->spdn_cnt = 0; + action |= ATA_EH_RESET; + goto done; + } + } - ata_dev_printk(dev, KERN_ERR, - "speed down requested but no transfer mode left\n"); return 0; + done: + /* device has been slowed down, blow error history */ + if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) + ata_ering_clear(&dev->ering); + return action; } /** - * ata_eh_autopsy - analyze error and determine recovery action - * @ap: ATA port to perform autopsy on + * ata_eh_worth_retry - analyze error and decide whether to retry + * @qc: qc to possibly retry + * + * Look at the cause of the error and decide if a retry + * might be useful or not. We don't want to retry media errors + * because the drive itself has probably already taken 10-30 seconds + * doing its own internal retries before reporting the failure. + */ +static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) +{ + if (qc->err_mask & AC_ERR_MEDIA) + return 0; /* don't retry media errors */ + if (qc->flags & ATA_QCFLAG_IO) + return 1; /* otherwise retry anything from fs stack */ + if (qc->err_mask & AC_ERR_INVALID) + return 0; /* don't retry these */ + return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ +} + +/** + * ata_eh_link_autopsy - analyze error and determine recovery action + * @link: host link to perform autopsy on * - * Analyze why @ap failed and determine which recovery action is - * needed. This function also sets more detailed AC_ERR_* values - * and fills sense data for ATAPI CHECK SENSE. + * Analyze why @link failed and determine which recovery actions + * are needed. This function also sets more detailed AC_ERR_* + * values and fills sense data for ATAPI CHECK SENSE. * * LOCKING: * Kernel thread context (may sleep). */ -static void ata_eh_autopsy(struct ata_port *ap) +static void ata_eh_link_autopsy(struct ata_link *link) { - struct ata_eh_context *ehc = &ap->eh_context; - unsigned int all_err_mask = 0; - int tag, is_io = 0; + struct ata_port *ap = link->ap; + struct ata_eh_context *ehc = &link->eh_context; + struct ata_device *dev; + unsigned int all_err_mask = 0, eflags = 0; + int tag; u32 serror; int rc; @@ -1307,15 +2131,19 @@ static void ata_eh_autopsy(struct ata_port *ap) return; /* obtain and analyze SError */ - rc = sata_scr_read(ap, SCR_ERROR, &serror); + rc = sata_scr_read(link, SCR_ERROR, &serror); if (rc == 0) { ehc->i.serror |= serror; - ata_eh_analyze_serror(ap); - } else if (rc != -EOPNOTSUPP) - ehc->i.action |= ATA_EH_HARDRESET; + ata_eh_analyze_serror(link); + } else if (rc != -EOPNOTSUPP) { + /* SError read failed, force reset and probing */ + ehc->i.probe_mask |= ATA_ALL_DEVICES; + ehc->i.action |= ATA_EH_RESET; + ehc->i.err_mask |= AC_ERR_OTHER; + } /* analyze NCQ failure */ - ata_eh_analyze_ncq_error(ap); + ata_eh_analyze_ncq_error(link); /* any real error trumps AC_ERR_OTHER */ if (ehc->i.err_mask & ~AC_ERR_OTHER) @@ -1326,7 +2154,8 @@ static void ata_eh_autopsy(struct ata_port *ap) for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); - if (!(qc->flags & ATA_QCFLAG_FAILED)) + if (!(qc->flags & ATA_QCFLAG_FAILED) || + ata_dev_phys_link(qc->dev) != link) continue; /* inherit upper level err_mask */ @@ -1345,55 +2174,241 @@ static void ata_eh_autopsy(struct ata_port *ap) qc->err_mask &= ~AC_ERR_OTHER; /* SENSE_VALID trumps dev/unknown error and revalidation */ - if (qc->flags & ATA_QCFLAG_SENSE_VALID) { + if (qc->flags & ATA_QCFLAG_SENSE_VALID) qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); - ehc->i.action &= ~ATA_EH_REVALIDATE; - } + + /* determine whether the command is worth retrying */ + if (ata_eh_worth_retry(qc)) + qc->flags |= ATA_QCFLAG_RETRY; /* accumulate error info */ ehc->i.dev = qc->dev; all_err_mask |= qc->err_mask; if (qc->flags & ATA_QCFLAG_IO) - is_io = 1; + eflags |= ATA_EFLAG_IS_IO; } /* enforce default EH actions */ if (ap->pflags & ATA_PFLAG_FROZEN || all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) - ehc->i.action |= ATA_EH_SOFTRESET; - else if (all_err_mask) + ehc->i.action |= ATA_EH_RESET; + else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || + (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) ehc->i.action |= ATA_EH_REVALIDATE; - /* if we have offending qcs and the associated failed device */ + /* If we have offending qcs and the associated failed device, + * perform per-dev EH action only on the offending device. + */ if (ehc->i.dev) { - /* speed down */ - ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io, - all_err_mask); - - /* perform per-dev EH action only on the offending device */ ehc->i.dev_action[ehc->i.dev->devno] |= ehc->i.action & ATA_EH_PERDEV_MASK; ehc->i.action &= ~ATA_EH_PERDEV_MASK; } + /* propagate timeout to host link */ + if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) + ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; + + /* record error and consider speeding down */ + dev = ehc->i.dev; + if (!dev && ((ata_link_max_devices(link) == 1 && + ata_dev_enabled(link->device)))) + dev = link->device; + + if (dev) { + if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) + eflags |= ATA_EFLAG_DUBIOUS_XFER; + ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); + } + DPRINTK("EXIT\n"); } /** - * ata_eh_report - report error handling to user - * @ap: ATA port EH is going on + * ata_eh_autopsy - analyze error and determine recovery action + * @ap: host port to perform autopsy on + * + * Analyze all links of @ap and determine why they failed and + * which recovery actions are needed. + * + * LOCKING: + * Kernel thread context (may sleep). + */ +void ata_eh_autopsy(struct ata_port *ap) +{ + struct ata_link *link; + + ata_for_each_link(link, ap, EDGE) + ata_eh_link_autopsy(link); + + /* Handle the frigging slave link. Autopsy is done similarly + * but actions and flags are transferred over to the master + * link and handled from there. + */ + if (ap->slave_link) { + struct ata_eh_context *mehc = &ap->link.eh_context; + struct ata_eh_context *sehc = &ap->slave_link->eh_context; + + /* transfer control flags from master to slave */ + sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; + + /* perform autopsy on the slave link */ + ata_eh_link_autopsy(ap->slave_link); + + /* transfer actions from slave to master and clear slave */ + ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); + mehc->i.action |= sehc->i.action; + mehc->i.dev_action[1] |= sehc->i.dev_action[1]; + mehc->i.flags |= sehc->i.flags; + ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); + } + + /* Autopsy of fanout ports can affect host link autopsy. + * Perform host link autopsy last. + */ + if (sata_pmp_attached(ap)) + ata_eh_link_autopsy(&ap->link); +} + +/** + * ata_get_cmd_descript - get description for ATA command + * @command: ATA command code to get description for + * + * Return a textual description of the given command, or NULL if the + * command is not known. + * + * LOCKING: + * None + */ +const char *ata_get_cmd_descript(u8 command) +{ +#ifdef CONFIG_ATA_VERBOSE_ERROR + static const struct + { + u8 command; + const char *text; + } cmd_descr[] = { + { ATA_CMD_DEV_RESET, "DEVICE RESET" }, + { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, + { ATA_CMD_STANDBY, "STANDBY" }, + { ATA_CMD_IDLE, "IDLE" }, + { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, + { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, + { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, + { ATA_CMD_NOP, "NOP" }, + { ATA_CMD_FLUSH, "FLUSH CACHE" }, + { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, + { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, + { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, + { ATA_CMD_SERVICE, "SERVICE" }, + { ATA_CMD_READ, "READ DMA" }, + { ATA_CMD_READ_EXT, "READ DMA EXT" }, + { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, + { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, + { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, + { ATA_CMD_WRITE, "WRITE DMA" }, + { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, + { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, + { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, + { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, + { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, + { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, + { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, + { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, + { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" }, + { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" }, + { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, + { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, + { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, + { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, + { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, + { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, + { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, + { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, + { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, + { ATA_CMD_SET_FEATURES, "SET FEATURES" }, + { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, + { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, + { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, + { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, + { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, + { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, + { ATA_CMD_SLEEP, "SLEEP" }, + { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, + { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, + { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, + { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, + { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, + { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, + { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, + { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, + { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, + { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, + { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, + { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, + { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, + { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, + { ATA_CMD_PMP_READ, "READ BUFFER" }, + { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, + { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, + { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" }, + { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, + { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, + { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, + { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, + { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, + { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, + { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, + { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, + { ATA_CMD_SMART, "SMART" }, + { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, + { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, + { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, + { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, + { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, + { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, + { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, + { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, + { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, + { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, + { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, + { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, + { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, + { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, + { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, + { ATA_CMD_RESTORE, "RECALIBRATE" }, + { 0, NULL } /* terminate list */ + }; + + unsigned int i; + for (i = 0; cmd_descr[i].text; i++) + if (cmd_descr[i].command == command) + return cmd_descr[i].text; +#endif + + return NULL; +} + +/** + * ata_eh_link_report - report error handling to user + * @link: ATA link EH is going on * * Report EH to user. * * LOCKING: * None. */ -static void ata_eh_report(struct ata_port *ap) +static void ata_eh_link_report(struct ata_link *link) { - struct ata_eh_context *ehc = &ap->eh_context; + struct ata_port *ap = link->ap; + struct ata_eh_context *ehc = &link->eh_context; const char *frozen, *desc; + char tries_buf[6] = ""; int tag, nr_failed = 0; + if (ehc->i.flags & ATA_EHI_QUIET) + return; + desc = NULL; if (ehc->i.desc[0] != '\0') desc = ehc->i.desc; @@ -1401,7 +2416,10 @@ static void ata_eh_report(struct ata_port *ap) for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); - if (!(qc->flags & ATA_QCFLAG_FAILED)) + if (!(qc->flags & ATA_QCFLAG_FAILED) || + ata_dev_phys_link(qc->dev) != link || + ((qc->flags & ATA_QCFLAG_QUIET) && + qc->err_mask == AC_ERR_DEV)) continue; if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) continue; @@ -1416,475 +2434,1126 @@ static void ata_eh_report(struct ata_port *ap) if (ap->pflags & ATA_PFLAG_FROZEN) frozen = " frozen"; + if (ap->eh_tries < ATA_EH_MAX_TRIES) + snprintf(tries_buf, sizeof(tries_buf), " t%d", + ap->eh_tries); + if (ehc->i.dev) { - ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " - "SAct 0x%x SErr 0x%x action 0x%x%s\n", - ehc->i.err_mask, ap->sactive, ehc->i.serror, - ehc->i.action, frozen); + ata_dev_err(ehc->i.dev, "exception Emask 0x%x " + "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", + ehc->i.err_mask, link->sactive, ehc->i.serror, + ehc->i.action, frozen, tries_buf); if (desc) - ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc); + ata_dev_err(ehc->i.dev, "%s\n", desc); } else { - ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x " - "SAct 0x%x SErr 0x%x action 0x%x%s\n", - ehc->i.err_mask, ap->sactive, ehc->i.serror, - ehc->i.action, frozen); + ata_link_err(link, "exception Emask 0x%x " + "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", + ehc->i.err_mask, link->sactive, ehc->i.serror, + ehc->i.action, frozen, tries_buf); if (desc) - ata_port_printk(ap, KERN_ERR, "(%s)\n", desc); + ata_link_err(link, "%s\n", desc); } +#ifdef CONFIG_ATA_VERBOSE_ERROR + if (ehc->i.serror) + ata_link_err(link, + "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", + ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", + ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", + ehc->i.serror & SERR_DATA ? "UnrecovData " : "", + ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", + ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", + ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", + ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", + ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", + ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", + ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", + ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", + ehc->i.serror & SERR_CRC ? "BadCRC " : "", + ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", + ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", + ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", + ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", + ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); +#endif + for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); + struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; + const u8 *cdb = qc->cdb; + char data_buf[20] = ""; + char cdb_buf[70] = ""; - if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask) + if (!(qc->flags & ATA_QCFLAG_FAILED) || + ata_dev_phys_link(qc->dev) != link || !qc->err_mask) continue; - ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x " - "Emask 0x%x stat 0x%x err 0x%x (%s)\n", - qc->tag, qc->tf.command, qc->err_mask, - qc->result_tf.command, qc->result_tf.feature, - ata_err_string(qc->err_mask)); + if (qc->dma_dir != DMA_NONE) { + static const char *dma_str[] = { + [DMA_BIDIRECTIONAL] = "bidi", + [DMA_TO_DEVICE] = "out", + [DMA_FROM_DEVICE] = "in", + }; + static const char *prot_str[] = { + [ATA_PROT_PIO] = "pio", + [ATA_PROT_DMA] = "dma", + [ATA_PROT_NCQ] = "ncq", + [ATAPI_PROT_PIO] = "pio", + [ATAPI_PROT_DMA] = "dma", + }; + + snprintf(data_buf, sizeof(data_buf), " %s %u %s", + prot_str[qc->tf.protocol], qc->nbytes, + dma_str[qc->dma_dir]); + } + + if (ata_is_atapi(qc->tf.protocol)) { + if (qc->scsicmd) + scsi_print_command(qc->scsicmd); + else + snprintf(cdb_buf, sizeof(cdb_buf), + "cdb %02x %02x %02x %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x %02x %02x %02x %02x\n ", + cdb[0], cdb[1], cdb[2], cdb[3], + cdb[4], cdb[5], cdb[6], cdb[7], + cdb[8], cdb[9], cdb[10], cdb[11], + cdb[12], cdb[13], cdb[14], cdb[15]); + } else { + const char *descr = ata_get_cmd_descript(cmd->command); + if (descr) + ata_dev_err(qc->dev, "failed command: %s\n", + descr); + } + + ata_dev_err(qc->dev, + "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " + "tag %d%s\n %s" + "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " + "Emask 0x%x (%s)%s\n", + cmd->command, cmd->feature, cmd->nsect, + cmd->lbal, cmd->lbam, cmd->lbah, + cmd->hob_feature, cmd->hob_nsect, + cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, + cmd->device, qc->tag, data_buf, cdb_buf, + res->command, res->feature, res->nsect, + res->lbal, res->lbam, res->lbah, + res->hob_feature, res->hob_nsect, + res->hob_lbal, res->hob_lbam, res->hob_lbah, + res->device, qc->err_mask, ata_err_string(qc->err_mask), + qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); + +#ifdef CONFIG_ATA_VERBOSE_ERROR + if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | + ATA_ERR)) { + if (res->command & ATA_BUSY) + ata_dev_err(qc->dev, "status: { Busy }\n"); + else + ata_dev_err(qc->dev, "status: { %s%s%s%s}\n", + res->command & ATA_DRDY ? "DRDY " : "", + res->command & ATA_DF ? "DF " : "", + res->command & ATA_DRQ ? "DRQ " : "", + res->command & ATA_ERR ? "ERR " : ""); + } + + if (cmd->command != ATA_CMD_PACKET && + (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | + ATA_IDNF | ATA_ABORTED))) + ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", + res->feature & ATA_ICRC ? "ICRC " : "", + res->feature & ATA_UNC ? "UNC " : "", + res->feature & ATA_AMNF ? "AMNF " : "", + res->feature & ATA_IDNF ? "IDNF " : "", + res->feature & ATA_ABORTED ? "ABRT " : ""); +#endif } } -static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, - unsigned int *classes) +/** + * ata_eh_report - report error handling to user + * @ap: ATA port to report EH about + * + * Report EH to user. + * + * LOCKING: + * None. + */ +void ata_eh_report(struct ata_port *ap) { - int i, rc; - - for (i = 0; i < ATA_MAX_DEVICES; i++) - classes[i] = ATA_DEV_UNKNOWN; + struct ata_link *link; - rc = reset(ap, classes); - if (rc) - return rc; + ata_for_each_link(link, ap, HOST_FIRST) + ata_eh_link_report(link); +} - /* If any class isn't ATA_DEV_UNKNOWN, consider classification - * is complete and convert all ATA_DEV_UNKNOWN to - * ATA_DEV_NONE. - */ - for (i = 0; i < ATA_MAX_DEVICES; i++) - if (classes[i] != ATA_DEV_UNKNOWN) - break; +static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, + unsigned int *classes, unsigned long deadline, + bool clear_classes) +{ + struct ata_device *dev; - if (i < ATA_MAX_DEVICES) - for (i = 0; i < ATA_MAX_DEVICES; i++) - if (classes[i] == ATA_DEV_UNKNOWN) - classes[i] = ATA_DEV_NONE; + if (clear_classes) + ata_for_each_dev(dev, link, ALL) + classes[dev->devno] = ATA_DEV_UNKNOWN; - return 0; + return reset(link, classes, deadline); } -static int ata_eh_followup_srst_needed(int rc, int classify, - const unsigned int *classes) +static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) { + if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) + return 0; if (rc == -EAGAIN) return 1; - if (rc != 0) - return 0; - if (classify && classes[0] == ATA_DEV_UNKNOWN) + if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) return 1; return 0; } -static int ata_eh_reset(struct ata_port *ap, int classify, - ata_prereset_fn_t prereset, ata_reset_fn_t softreset, - ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) +int ata_eh_reset(struct ata_link *link, int classify, + ata_prereset_fn_t prereset, ata_reset_fn_t softreset, + ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { - struct ata_eh_context *ehc = &ap->eh_context; + struct ata_port *ap = link->ap; + struct ata_link *slave = ap->slave_link; + struct ata_eh_context *ehc = &link->eh_context; + struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; unsigned int *classes = ehc->classes; - int tries = ATA_EH_RESET_TRIES; + unsigned int lflags = link->flags; int verbose = !(ehc->i.flags & ATA_EHI_QUIET); - unsigned int action; + int max_tries = 0, try = 0; + struct ata_link *failed_link; + struct ata_device *dev; + unsigned long deadline, now; ata_reset_fn_t reset; - int i, did_followup_srst, rc; - - /* about to reset */ - ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); + unsigned long flags; + u32 sstatus; + int nr_unknown, rc; - /* Determine which reset to use and record in ehc->i.action. - * prereset() may examine and modify it. + /* + * Prepare to reset */ - action = ehc->i.action; - ehc->i.action &= ~ATA_EH_RESET_MASK; - if (softreset && (!hardreset || (!sata_set_spd_needed(ap) && - !(action & ATA_EH_HARDRESET)))) - ehc->i.action |= ATA_EH_SOFTRESET; - else - ehc->i.action |= ATA_EH_HARDRESET; + while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) + max_tries++; + if (link->flags & ATA_LFLAG_RST_ONCE) + max_tries = 1; + if (link->flags & ATA_LFLAG_NO_HRST) + hardreset = NULL; + if (link->flags & ATA_LFLAG_NO_SRST) + softreset = NULL; + + /* make sure each reset attempt is at least COOL_DOWN apart */ + if (ehc->i.flags & ATA_EHI_DID_RESET) { + now = jiffies; + WARN_ON(time_after(ehc->last_reset, now)); + deadline = ata_deadline(ehc->last_reset, + ATA_EH_RESET_COOL_DOWN); + if (time_before(now, deadline)) + schedule_timeout_uninterruptible(deadline - now); + } - if (prereset) { - rc = prereset(ap); - if (rc) { - if (rc == -ENOENT) { - ata_port_printk(ap, KERN_DEBUG, "port disabled. ignoring.\n"); - ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; - } else - ata_port_printk(ap, KERN_ERR, - "prereset failed (errno=%d)\n", rc); - return rc; - } + spin_lock_irqsave(ap->lock, flags); + ap->pflags |= ATA_PFLAG_RESETTING; + spin_unlock_irqrestore(ap->lock, flags); + + ata_eh_about_to_do(link, NULL, ATA_EH_RESET); + + ata_for_each_dev(dev, link, ALL) { + /* If we issue an SRST then an ATA drive (not ATAPI) + * may change configuration and be in PIO0 timing. If + * we do a hard reset (or are coming from power on) + * this is true for ATA or ATAPI. Until we've set a + * suitable controller mode we should not touch the + * bus as we may be talking too fast. + */ + dev->pio_mode = XFER_PIO_0; + dev->dma_mode = 0xff; + + /* If the controller has a pio mode setup function + * then use it to set the chipset to rights. Don't + * touch the DMA setup as that will be dealt with when + * configuring devices. + */ + if (ap->ops->set_piomode) + ap->ops->set_piomode(ap, dev); } - /* prereset() might have modified ehc->i.action */ - if (ehc->i.action & ATA_EH_HARDRESET) + /* prefer hardreset */ + reset = NULL; + ehc->i.action &= ~ATA_EH_RESET; + if (hardreset) { reset = hardreset; - else if (ehc->i.action & ATA_EH_SOFTRESET) + ehc->i.action |= ATA_EH_HARDRESET; + } else if (softreset) { reset = softreset; - else { - /* prereset told us not to reset, bang classes and return */ - for (i = 0; i < ATA_MAX_DEVICES; i++) - classes[i] = ATA_DEV_NONE; - return 0; + ehc->i.action |= ATA_EH_SOFTRESET; } - /* did prereset() screw up? if so, fix up to avoid oopsing */ - if (!reset) { - ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested " - "invalid reset type\n"); - if (softreset) - reset = softreset; - else - reset = hardreset; - } + if (prereset) { + unsigned long deadline = ata_deadline(jiffies, + ATA_EH_PRERESET_TIMEOUT); - retry: - /* shut up during boot probing */ - if (verbose) - ata_port_printk(ap, KERN_INFO, "%s resetting port\n", - reset == softreset ? "soft" : "hard"); + if (slave) { + sehc->i.action &= ~ATA_EH_RESET; + sehc->i.action |= ehc->i.action; + } - /* mark that this EH session started with reset */ - ehc->i.flags |= ATA_EHI_DID_RESET; + rc = prereset(link, deadline); - rc = ata_do_reset(ap, reset, classes); + /* If present, do prereset on slave link too. Reset + * is skipped iff both master and slave links report + * -ENOENT or clear ATA_EH_RESET. + */ + if (slave && (rc == 0 || rc == -ENOENT)) { + int tmp; - did_followup_srst = 0; - if (reset == hardreset && - ata_eh_followup_srst_needed(rc, classify, classes)) { - /* okay, let's do follow-up softreset */ - did_followup_srst = 1; - reset = softreset; + tmp = prereset(slave, deadline); + if (tmp != -ENOENT) + rc = tmp; - if (!reset) { - ata_port_printk(ap, KERN_ERR, - "follow-up softreset required " - "but no softreset avaliable\n"); - return -EINVAL; + ehc->i.action |= sehc->i.action; } - ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); - rc = ata_do_reset(ap, reset, classes); + if (rc) { + if (rc == -ENOENT) { + ata_link_dbg(link, "port disabled--ignoring\n"); + ehc->i.action &= ~ATA_EH_RESET; + + ata_for_each_dev(dev, link, ALL) + classes[dev->devno] = ATA_DEV_NONE; - if (rc == 0 && classify && - classes[0] == ATA_DEV_UNKNOWN) { - ata_port_printk(ap, KERN_ERR, - "classification failed\n"); - return -EINVAL; + rc = 0; + } else + ata_link_err(link, + "prereset failed (errno=%d)\n", + rc); + goto out; + } + + /* prereset() might have cleared ATA_EH_RESET. If so, + * bang classes, thaw and return. + */ + if (reset && !(ehc->i.action & ATA_EH_RESET)) { + ata_for_each_dev(dev, link, ALL) + classes[dev->devno] = ATA_DEV_NONE; + if ((ap->pflags & ATA_PFLAG_FROZEN) && + ata_is_host_link(link)) + ata_eh_thaw_port(ap); + rc = 0; + goto out; } } - if (rc && --tries) { - const char *type; + retry: + /* + * Perform reset + */ + if (ata_is_host_link(link)) + ata_eh_freeze_port(ap); - if (reset == softreset) { - if (did_followup_srst) - type = "follow-up soft"; - else - type = "soft"; - } else - type = "hard"; + deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); - ata_port_printk(ap, KERN_WARNING, - "%sreset failed, retrying in 5 secs\n", type); - ssleep(5); + if (reset) { + if (verbose) + ata_link_info(link, "%s resetting link\n", + reset == softreset ? "soft" : "hard"); + /* mark that this EH session started with reset */ + ehc->last_reset = jiffies; if (reset == hardreset) - sata_down_spd_limit(ap); - if (hardreset) - reset = hardreset; - goto retry; + ehc->i.flags |= ATA_EHI_DID_HARDRESET; + else + ehc->i.flags |= ATA_EHI_DID_SOFTRESET; + + rc = ata_do_reset(link, reset, classes, deadline, true); + if (rc && rc != -EAGAIN) { + failed_link = link; + goto fail; + } + + /* hardreset slave link if existent */ + if (slave && reset == hardreset) { + int tmp; + + if (verbose) + ata_link_info(slave, "hard resetting link\n"); + + ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); + tmp = ata_do_reset(slave, reset, classes, deadline, + false); + switch (tmp) { + case -EAGAIN: + rc = -EAGAIN; + case 0: + break; + default: + failed_link = slave; + rc = tmp; + goto fail; + } + } + + /* perform follow-up SRST if necessary */ + if (reset == hardreset && + ata_eh_followup_srst_needed(link, rc)) { + reset = softreset; + + if (!reset) { + ata_link_err(link, + "follow-up softreset required but no softreset available\n"); + failed_link = link; + rc = -EINVAL; + goto fail; + } + + ata_eh_about_to_do(link, NULL, ATA_EH_RESET); + rc = ata_do_reset(link, reset, classes, deadline, true); + if (rc) { + failed_link = link; + goto fail; + } + } + } else { + if (verbose) + ata_link_info(link, + "no reset method available, skipping reset\n"); + if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) + lflags |= ATA_LFLAG_ASSUME_ATA; } - if (rc == 0) { + /* + * Post-reset processing + */ + ata_for_each_dev(dev, link, ALL) { /* After the reset, the device state is PIO 0 and the - * controller state is undefined. Record the mode. + * controller state is undefined. Reset also wakes up + * drives from sleeping mode. */ - for (i = 0; i < ATA_MAX_DEVICES; i++) - ap->device[i].pio_mode = XFER_PIO_0; + dev->pio_mode = XFER_PIO_0; + dev->flags &= ~ATA_DFLAG_SLEEPING; - if (postreset) - postreset(ap, classes); + if (ata_phys_link_offline(ata_dev_phys_link(dev))) + continue; - /* reset successful, schedule revalidation */ - ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); - ehc->i.action |= ATA_EH_REVALIDATE; + /* apply class override */ + if (lflags & ATA_LFLAG_ASSUME_ATA) + classes[dev->devno] = ATA_DEV_ATA; + else if (lflags & ATA_LFLAG_ASSUME_SEMB) + classes[dev->devno] = ATA_DEV_SEMB_UNSUP; + } + + /* record current link speed */ + if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) + link->sata_spd = (sstatus >> 4) & 0xf; + if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) + slave->sata_spd = (sstatus >> 4) & 0xf; + + /* thaw the port */ + if (ata_is_host_link(link)) + ata_eh_thaw_port(ap); + + /* postreset() should clear hardware SError. Although SError + * is cleared during link resume, clearing SError here is + * necessary as some PHYs raise hotplug events after SRST. + * This introduces race condition where hotplug occurs between + * reset and here. This race is mediated by cross checking + * link onlineness and classification result later. + */ + if (postreset) { + postreset(link, classes); + if (slave) + postreset(slave, classes); + } + + /* + * Some controllers can't be frozen very well and may set spurious + * error conditions during reset. Clear accumulated error + * information and re-thaw the port if frozen. As reset is the + * final recovery action and we cross check link onlineness against + * device classification later, no hotplug event is lost by this. + */ + spin_lock_irqsave(link->ap->lock, flags); + memset(&link->eh_info, 0, sizeof(link->eh_info)); + if (slave) + memset(&slave->eh_info, 0, sizeof(link->eh_info)); + ap->pflags &= ~ATA_PFLAG_EH_PENDING; + spin_unlock_irqrestore(link->ap->lock, flags); + + if (ap->pflags & ATA_PFLAG_FROZEN) + ata_eh_thaw_port(ap); + + /* + * Make sure onlineness and classification result correspond. + * Hotplug could have happened during reset and some + * controllers fail to wait while a drive is spinning up after + * being hotplugged causing misdetection. By cross checking + * link on/offlineness and classification result, those + * conditions can be reliably detected and retried. + */ + nr_unknown = 0; + ata_for_each_dev(dev, link, ALL) { + if (ata_phys_link_online(ata_dev_phys_link(dev))) { + if (classes[dev->devno] == ATA_DEV_UNKNOWN) { + ata_dev_dbg(dev, "link online but device misclassified\n"); + classes[dev->devno] = ATA_DEV_NONE; + nr_unknown++; + } + } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { + if (ata_class_enabled(classes[dev->devno])) + ata_dev_dbg(dev, + "link offline, clearing class %d to NONE\n", + classes[dev->devno]); + classes[dev->devno] = ATA_DEV_NONE; + } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { + ata_dev_dbg(dev, + "link status unknown, clearing UNKNOWN to NONE\n"); + classes[dev->devno] = ATA_DEV_NONE; + } } + if (classify && nr_unknown) { + if (try < max_tries) { + ata_link_warn(link, + "link online but %d devices misclassified, retrying\n", + nr_unknown); + failed_link = link; + rc = -EAGAIN; + goto fail; + } + ata_link_warn(link, + "link online but %d devices misclassified, " + "device detection might fail\n", nr_unknown); + } + + /* reset successful, schedule revalidation */ + ata_eh_done(link, NULL, ATA_EH_RESET); + if (slave) + ata_eh_done(slave, NULL, ATA_EH_RESET); + ehc->last_reset = jiffies; /* update to completion time */ + ehc->i.action |= ATA_EH_REVALIDATE; + link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ + + rc = 0; + out: + /* clear hotplug flag */ + ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; + if (slave) + sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; + + spin_lock_irqsave(ap->lock, flags); + ap->pflags &= ~ATA_PFLAG_RESETTING; + spin_unlock_irqrestore(ap->lock, flags); + return rc; + + fail: + /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ + if (!ata_is_host_link(link) && + sata_scr_read(link, SCR_STATUS, &sstatus)) + rc = -ERESTART; + + if (try >= max_tries) { + /* + * Thaw host port even if reset failed, so that the port + * can be retried on the next phy event. This risks + * repeated EH runs but seems to be a better tradeoff than + * shutting down a port after a botched hotplug attempt. + */ + if (ata_is_host_link(link)) + ata_eh_thaw_port(ap); + goto out; + } + + now = jiffies; + if (time_before(now, deadline)) { + unsigned long delta = deadline - now; + + ata_link_warn(failed_link, + "reset failed (errno=%d), retrying in %u secs\n", + rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); + + ata_eh_release(ap); + while (delta) + delta = schedule_timeout_uninterruptible(delta); + ata_eh_acquire(ap); + } + + /* + * While disks spinup behind PMP, some controllers fail sending SRST. + * They need to be reset - as well as the PMP - before retrying. + */ + if (rc == -ERESTART) { + if (ata_is_host_link(link)) + ata_eh_thaw_port(ap); + goto out; + } + + if (try == max_tries - 1) { + sata_down_spd_limit(link, 0); + if (slave) + sata_down_spd_limit(slave, 0); + } else if (rc == -EPIPE) + sata_down_spd_limit(failed_link, 0); + + if (hardreset) + reset = hardreset; + goto retry; } -static int ata_eh_revalidate_and_attach(struct ata_port *ap, +static inline void ata_eh_pull_park_action(struct ata_port *ap) +{ + struct ata_link *link; + struct ata_device *dev; + unsigned long flags; + + /* + * This function can be thought of as an extended version of + * ata_eh_about_to_do() specially crafted to accommodate the + * requirements of ATA_EH_PARK handling. Since the EH thread + * does not leave the do {} while () loop in ata_eh_recover as + * long as the timeout for a park request to *one* device on + * the port has not expired, and since we still want to pick + * up park requests to other devices on the same port or + * timeout updates for the same device, we have to pull + * ATA_EH_PARK actions from eh_info into eh_context.i + * ourselves at the beginning of each pass over the loop. + * + * Additionally, all write accesses to &ap->park_req_pending + * through reinit_completion() (see below) or complete_all() + * (see ata_scsi_park_store()) are protected by the host lock. + * As a result we have that park_req_pending.done is zero on + * exit from this function, i.e. when ATA_EH_PARK actions for + * *all* devices on port ap have been pulled into the + * respective eh_context structs. If, and only if, + * park_req_pending.done is non-zero by the time we reach + * wait_for_completion_timeout(), another ATA_EH_PARK action + * has been scheduled for at least one of the devices on port + * ap and we have to cycle over the do {} while () loop in + * ata_eh_recover() again. + */ + + spin_lock_irqsave(ap->lock, flags); + reinit_completion(&ap->park_req_pending); + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ALL) { + struct ata_eh_info *ehi = &link->eh_info; + + link->eh_context.i.dev_action[dev->devno] |= + ehi->dev_action[dev->devno] & ATA_EH_PARK; + ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); + } + } + spin_unlock_irqrestore(ap->lock, flags); +} + +static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) +{ + struct ata_eh_context *ehc = &dev->link->eh_context; + struct ata_taskfile tf; + unsigned int err_mask; + + ata_tf_init(dev, &tf); + if (park) { + ehc->unloaded_mask |= 1 << dev->devno; + tf.command = ATA_CMD_IDLEIMMEDIATE; + tf.feature = 0x44; + tf.lbal = 0x4c; + tf.lbam = 0x4e; + tf.lbah = 0x55; + } else { + ehc->unloaded_mask &= ~(1 << dev->devno); + tf.command = ATA_CMD_CHK_POWER; + } + + tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; + tf.protocol |= ATA_PROT_NODATA; + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + if (park && (err_mask || tf.lbal != 0xc4)) { + ata_dev_err(dev, "head unload failed!\n"); + ehc->unloaded_mask &= ~(1 << dev->devno); + } +} + +static int ata_eh_revalidate_and_attach(struct ata_link *link, struct ata_device **r_failed_dev) { - struct ata_eh_context *ehc = &ap->eh_context; + struct ata_port *ap = link->ap; + struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; + unsigned int new_mask = 0; unsigned long flags; - int i, rc = 0; + int rc = 0; DPRINTK("ENTER\n"); - for (i = 0; i < ATA_MAX_DEVICES; i++) { - unsigned int action; + /* For PATA drive side cable detection to work, IDENTIFY must + * be done backwards such that PDIAG- is released by the slave + * device before the master device is identified. + */ + ata_for_each_dev(dev, link, ALL_REVERSE) { + unsigned int action = ata_eh_dev_action(dev); + unsigned int readid_flags = 0; - dev = &ap->device[i]; - action = ata_eh_dev_action(dev); + if (ehc->i.flags & ATA_EHI_DID_RESET) + readid_flags |= ATA_READID_POSTRESET; - if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) { - if (ata_port_offline(ap)) { + if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { + WARN_ON(dev->class == ATA_DEV_PMP); + + if (ata_phys_link_offline(ata_dev_phys_link(dev))) { rc = -EIO; - break; + goto err; } - ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE); - rc = ata_dev_revalidate(dev, - ehc->i.flags & ATA_EHI_DID_RESET); + ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); + rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], + readid_flags); if (rc) - break; + goto err; - ata_eh_done(ap, dev, ATA_EH_REVALIDATE); + ata_eh_done(link, dev, ATA_EH_REVALIDATE); + + /* Configuration may have changed, reconfigure + * transfer mode. + */ + ehc->i.flags |= ATA_EHI_SETMODE; /* schedule the scsi_rescan_device() here */ - queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); + schedule_work(&(ap->scsi_rescan_task)); } else if (dev->class == ATA_DEV_UNKNOWN && ehc->tries[dev->devno] && ata_class_enabled(ehc->classes[dev->devno])) { + /* Temporarily set dev->class, it will be + * permanently set once all configurations are + * complete. This is necessary because new + * device configuration is done in two + * separate loops. + */ dev->class = ehc->classes[dev->devno]; - rc = ata_dev_read_id(dev, &dev->class, 1, dev->id); - if (rc == 0) - rc = ata_dev_configure(dev, 1); - - if (rc) { - dev->class = ATA_DEV_UNKNOWN; + if (dev->class == ATA_DEV_PMP) + rc = sata_pmp_attach(dev); + else + rc = ata_dev_read_id(dev, &dev->class, + readid_flags, dev->id); + + /* read_id might have changed class, store and reset */ + ehc->classes[dev->devno] = dev->class; + dev->class = ATA_DEV_UNKNOWN; + + switch (rc) { + case 0: + /* clear error info accumulated during probe */ + ata_ering_clear(&dev->ering); + new_mask |= 1 << dev->devno; + break; + case -ENOENT: + /* IDENTIFY was issued to non-existent + * device. No need to reset. Just + * thaw and ignore the device. + */ + ata_eh_thaw_port(ap); break; + default: + goto err; } + } + } - spin_lock_irqsave(ap->lock, flags); - ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; - spin_unlock_irqrestore(ap->lock, flags); + /* PDIAG- should have been released, ask cable type if post-reset */ + if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { + if (ap->ops->cable_detect) + ap->cbl = ap->ops->cable_detect(ap); + ata_force_cbl(ap); + } + + /* Configure new devices forward such that user doesn't see + * device detection messages backwards. + */ + ata_for_each_dev(dev, link, ALL) { + if (!(new_mask & (1 << dev->devno))) + continue; + + dev->class = ehc->classes[dev->devno]; + + if (dev->class == ATA_DEV_PMP) + continue; + + ehc->i.flags |= ATA_EHI_PRINTINFO; + rc = ata_dev_configure(dev); + ehc->i.flags &= ~ATA_EHI_PRINTINFO; + if (rc) { + dev->class = ATA_DEV_UNKNOWN; + goto err; } + + spin_lock_irqsave(ap->lock, flags); + ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; + spin_unlock_irqrestore(ap->lock, flags); + + /* new device discovered, configure xfermode */ + ehc->i.flags |= ATA_EHI_SETMODE; } - if (rc) - *r_failed_dev = dev; + return 0; - DPRINTK("EXIT\n"); + err: + *r_failed_dev = dev; + DPRINTK("EXIT rc=%d\n", rc); return rc; } /** - * ata_eh_suspend - handle suspend EH action - * @ap: target host port - * @r_failed_dev: result parameter to indicate failing device + * ata_set_mode - Program timings and issue SET FEATURES - XFER + * @link: link on which timings will be programmed + * @r_failed_dev: out parameter for failed device * - * Handle suspend EH action. Disk devices are spinned down and - * other types of devices are just marked suspended. Once - * suspended, no EH action to the device is allowed until it is - * resumed. + * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If + * ata_set_mode() fails, pointer to the failing device is + * returned in @r_failed_dev. * * LOCKING: - * Kernel thread context (may sleep). + * PCI/etc. bus probe sem. * * RETURNS: - * 0 on success, -errno otherwise + * 0 on success, negative errno otherwise */ -static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev) +int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) { + struct ata_port *ap = link->ap; struct ata_device *dev; - int i, rc = 0; + int rc; - DPRINTK("ENTER\n"); + /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ + ata_for_each_dev(dev, link, ENABLED) { + if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { + struct ata_ering_entry *ent; - for (i = 0; i < ATA_MAX_DEVICES; i++) { - unsigned long flags; - unsigned int action, err_mask; + ent = ata_ering_top(&dev->ering); + if (ent) + ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; + } + } - dev = &ap->device[i]; - action = ata_eh_dev_action(dev); + /* has private set_mode? */ + if (ap->ops->set_mode) + rc = ap->ops->set_mode(link, r_failed_dev); + else + rc = ata_do_set_mode(link, r_failed_dev); - if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND)) - continue; + /* if transfer mode has changed, set DUBIOUS_XFER on device */ + ata_for_each_dev(dev, link, ENABLED) { + struct ata_eh_context *ehc = &link->eh_context; + u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; + u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); - WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED); + if (dev->xfer_mode != saved_xfer_mode || + ata_ncq_enabled(dev) != saved_ncq) + dev->flags |= ATA_DFLAG_DUBIOUS_XFER; + } - ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND); + return rc; +} - if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) { - /* flush cache */ - rc = ata_flush_cache(dev); - if (rc) - break; +/** + * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset + * @dev: ATAPI device to clear UA for + * + * Resets and other operations can make an ATAPI device raise + * UNIT ATTENTION which causes the next operation to fail. This + * function clears UA. + * + * LOCKING: + * EH context (may sleep). + * + * RETURNS: + * 0 on success, -errno on failure. + */ +static int atapi_eh_clear_ua(struct ata_device *dev) +{ + int i; - /* spin down */ - err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1); - if (err_mask) { - ata_dev_printk(dev, KERN_ERR, "failed to " - "spin down (err_mask=0x%x)\n", - err_mask); - rc = -EIO; - break; - } + for (i = 0; i < ATA_EH_UA_TRIES; i++) { + u8 *sense_buffer = dev->link->ap->sector_buf; + u8 sense_key = 0; + unsigned int err_mask; + + err_mask = atapi_eh_tur(dev, &sense_key); + if (err_mask != 0 && err_mask != AC_ERR_DEV) { + ata_dev_warn(dev, + "TEST_UNIT_READY failed (err_mask=0x%x)\n", + err_mask); + return -EIO; } - spin_lock_irqsave(ap->lock, flags); - dev->flags |= ATA_DFLAG_SUSPENDED; - spin_unlock_irqrestore(ap->lock, flags); + if (!err_mask || sense_key != UNIT_ATTENTION) + return 0; - ata_eh_done(ap, dev, ATA_EH_SUSPEND); + err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); + if (err_mask) { + ata_dev_warn(dev, "failed to clear " + "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); + return -EIO; + } } - if (rc) - *r_failed_dev = dev; + ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n", + ATA_EH_UA_TRIES); - DPRINTK("EXIT\n"); return 0; } /** - * ata_eh_prep_resume - prep for resume EH action - * @ap: target host port + * ata_eh_maybe_retry_flush - Retry FLUSH if necessary + * @dev: ATA device which may need FLUSH retry * - * Clear SUSPENDED in preparation for scheduled resume actions. - * This allows other parts of EH to access the devices being - * resumed. + * If @dev failed FLUSH, it needs to be reported upper layer + * immediately as it means that @dev failed to remap and already + * lost at least a sector and further FLUSH retrials won't make + * any difference to the lost sector. However, if FLUSH failed + * for other reasons, for example transmission error, FLUSH needs + * to be retried. * - * LOCKING: - * Kernel thread context (may sleep). + * This function determines whether FLUSH failure retry is + * necessary and performs it if so. + * + * RETURNS: + * 0 if EH can continue, -errno if EH needs to be repeated. */ -static void ata_eh_prep_resume(struct ata_port *ap) +static int ata_eh_maybe_retry_flush(struct ata_device *dev) { - struct ata_device *dev; - unsigned long flags; - int i; + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; + struct ata_queued_cmd *qc; + struct ata_taskfile tf; + unsigned int err_mask; + int rc = 0; - DPRINTK("ENTER\n"); + /* did flush fail for this device? */ + if (!ata_tag_valid(link->active_tag)) + return 0; - for (i = 0; i < ATA_MAX_DEVICES; i++) { - unsigned int action; + qc = __ata_qc_from_tag(ap, link->active_tag); + if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && + qc->tf.command != ATA_CMD_FLUSH)) + return 0; - dev = &ap->device[i]; - action = ata_eh_dev_action(dev); + /* if the device failed it, it should be reported to upper layers */ + if (qc->err_mask & AC_ERR_DEV) + return 0; - if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME)) - continue; + /* flush failed for some other reason, give it another shot */ + ata_tf_init(dev, &tf); - spin_lock_irqsave(ap->lock, flags); - dev->flags &= ~ATA_DFLAG_SUSPENDED; - spin_unlock_irqrestore(ap->lock, flags); + tf.command = qc->tf.command; + tf.flags |= ATA_TFLAG_DEVICE; + tf.protocol = ATA_PROT_NODATA; + + ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n", + tf.command, qc->err_mask); + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + if (!err_mask) { + /* + * FLUSH is complete but there's no way to + * successfully complete a failed command from EH. + * Making sure retry is allowed at least once and + * retrying it should do the trick - whatever was in + * the cache is already on the platter and this won't + * cause infinite loop. + */ + qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); + } else { + ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n", + err_mask); + rc = -EIO; + + /* if device failed it, report it to upper layers */ + if (err_mask & AC_ERR_DEV) { + qc->err_mask |= AC_ERR_DEV; + qc->result_tf = tf; + if (!(ap->pflags & ATA_PFLAG_FROZEN)) + rc = 0; + } } - - DPRINTK("EXIT\n"); + return rc; } /** - * ata_eh_resume - handle resume EH action - * @ap: target host port - * @r_failed_dev: result parameter to indicate failing device + * ata_eh_set_lpm - configure SATA interface power management + * @link: link to configure power management + * @policy: the link power management policy + * @r_failed_dev: out parameter for failed device * - * Handle resume EH action. Target devices are already reset and - * revalidated. Spinning up is the only operation left. + * Enable SATA Interface power management. This will enable + * Device Interface Power Management (DIPM) for min_power + * policy, and then call driver specific callbacks for + * enabling Host Initiated Power management. * * LOCKING: - * Kernel thread context (may sleep). + * EH context. * * RETURNS: - * 0 on success, -errno otherwise + * 0 on success, -errno on failure. */ -static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev) +static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + struct ata_device **r_failed_dev) { - struct ata_device *dev; - int i, rc = 0; - - DPRINTK("ENTER\n"); + struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; + struct ata_eh_context *ehc = &link->eh_context; + struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; + enum ata_lpm_policy old_policy = link->lpm_policy; + bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; + unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; + unsigned int err_mask; + int rc; - for (i = 0; i < ATA_MAX_DEVICES; i++) { - unsigned int action, err_mask; + /* if the link or host doesn't do LPM, noop */ + if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) + return 0; - dev = &ap->device[i]; - action = ata_eh_dev_action(dev); + /* + * DIPM is enabled only for MIN_POWER as some devices + * misbehave when the host NACKs transition to SLUMBER. Order + * device and link configurations such that the host always + * allows DIPM requests. + */ + ata_for_each_dev(dev, link, ENABLED) { + bool hipm = ata_id_has_hipm(dev->id); + bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; + + /* find the first enabled and LPM enabled devices */ + if (!link_dev) + link_dev = dev; + + if (!lpm_dev && (hipm || dipm)) + lpm_dev = dev; + + hints &= ~ATA_LPM_EMPTY; + if (!hipm) + hints &= ~ATA_LPM_HIPM; + + /* disable DIPM before changing link config */ + if (policy != ATA_LPM_MIN_POWER && dipm) { + err_mask = ata_dev_set_feature(dev, + SETFEATURES_SATA_DISABLE, SATA_DIPM); + if (err_mask && err_mask != AC_ERR_DEV) { + ata_dev_warn(dev, + "failed to disable DIPM, Emask 0x%x\n", + err_mask); + rc = -EIO; + goto fail; + } + } + } - if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME)) - continue; + if (ap) { + rc = ap->ops->set_lpm(link, policy, hints); + if (!rc && ap->slave_link) + rc = ap->ops->set_lpm(ap->slave_link, policy, hints); + } else + rc = sata_pmp_set_lpm(link, policy, hints); - ata_eh_about_to_do(ap, dev, ATA_EH_RESUME); + /* + * Attribute link config failure to the first (LPM) enabled + * device on the link. + */ + if (rc) { + if (rc == -EOPNOTSUPP) { + link->flags |= ATA_LFLAG_NO_LPM; + return 0; + } + dev = lpm_dev ? lpm_dev : link_dev; + goto fail; + } - if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) { - err_mask = ata_do_simple_cmd(dev, - ATA_CMD_IDLEIMMEDIATE); - if (err_mask) { - ata_dev_printk(dev, KERN_ERR, "failed to " - "spin up (err_mask=0x%x)\n", - err_mask); + /* + * Low level driver acked the transition. Issue DIPM command + * with the new policy set. + */ + link->lpm_policy = policy; + if (ap && ap->slave_link) + ap->slave_link->lpm_policy = policy; + + /* host config updated, enable DIPM if transitioning to MIN_POWER */ + ata_for_each_dev(dev, link, ENABLED) { + if (policy == ATA_LPM_MIN_POWER && !no_dipm && + ata_id_has_dipm(dev->id)) { + err_mask = ata_dev_set_feature(dev, + SETFEATURES_SATA_ENABLE, SATA_DIPM); + if (err_mask && err_mask != AC_ERR_DEV) { + ata_dev_warn(dev, + "failed to enable DIPM, Emask 0x%x\n", + err_mask); rc = -EIO; - break; + goto fail; } } - - ata_eh_done(ap, dev, ATA_EH_RESUME); } - if (rc) - *r_failed_dev = dev; - - DPRINTK("EXIT\n"); return 0; + +fail: + /* restore the old policy */ + link->lpm_policy = old_policy; + if (ap && ap->slave_link) + ap->slave_link->lpm_policy = old_policy; + + /* if no device or only one more chance is left, disable LPM */ + if (!dev || ehc->tries[dev->devno] <= 2) { + ata_link_warn(link, "disabling LPM on the link\n"); + link->flags |= ATA_LFLAG_NO_LPM; + } + if (r_failed_dev) + *r_failed_dev = dev; + return rc; } -static int ata_port_nr_enabled(struct ata_port *ap) +int ata_link_nr_enabled(struct ata_link *link) { - int i, cnt = 0; + struct ata_device *dev; + int cnt = 0; - for (i = 0; i < ATA_MAX_DEVICES; i++) - if (ata_dev_enabled(&ap->device[i])) - cnt++; + ata_for_each_dev(dev, link, ENABLED) + cnt++; return cnt; } -static int ata_port_nr_vacant(struct ata_port *ap) +static int ata_link_nr_vacant(struct ata_link *link) { - int i, cnt = 0; + struct ata_device *dev; + int cnt = 0; - for (i = 0; i < ATA_MAX_DEVICES; i++) - if (ap->device[i].class == ATA_DEV_UNKNOWN) + ata_for_each_dev(dev, link, ALL) + if (dev->class == ATA_DEV_UNKNOWN) cnt++; return cnt; } -static int ata_eh_skip_recovery(struct ata_port *ap) +static int ata_eh_skip_recovery(struct ata_link *link) { - struct ata_eh_context *ehc = &ap->eh_context; - int i; - - /* skip if all possible devices are suspended */ - for (i = 0; i < ata_port_max_devices(ap); i++) { - struct ata_device *dev = &ap->device[i]; + struct ata_port *ap = link->ap; + struct ata_eh_context *ehc = &link->eh_context; + struct ata_device *dev; - if (!(dev->flags & ATA_DFLAG_SUSPENDED)) - break; - } + /* skip disabled links */ + if (link->flags & ATA_LFLAG_DISABLED) + return 1; - if (i == ata_port_max_devices(ap)) + /* skip if explicitly requested */ + if (ehc->i.flags & ATA_EHI_NO_RECOVERY) return 1; - /* thaw frozen port, resume link and recover failed devices */ - if ((ap->pflags & ATA_PFLAG_FROZEN) || - (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap)) + /* thaw frozen port and recover failed devices */ + if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) return 0; - /* skip if class codes for all vacant slots are ATA_DEV_NONE */ - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; + /* reset at least once if reset is requested */ + if ((ehc->i.action & ATA_EH_RESET) && + !(ehc->i.flags & ATA_EHI_DID_RESET)) + return 0; + /* skip if class codes for all vacant slots are ATA_DEV_NONE */ + ata_for_each_dev(dev, link, ALL) { if (dev->class == ATA_DEV_UNKNOWN && ehc->classes[dev->devno] != ATA_DEV_NONE) return 0; @@ -1893,6 +3562,119 @@ static int ata_eh_skip_recovery(struct ata_port *ap) return 1; } +static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) +{ + u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); + u64 now = get_jiffies_64(); + int *trials = void_arg; + + if ((ent->eflags & ATA_EFLAG_OLD_ER) || + (ent->timestamp < now - min(now, interval))) + return -1; + + (*trials)++; + return 0; +} + +static int ata_eh_schedule_probe(struct ata_device *dev) +{ + struct ata_eh_context *ehc = &dev->link->eh_context; + struct ata_link *link = ata_dev_phys_link(dev); + int trials = 0; + + if (!(ehc->i.probe_mask & (1 << dev->devno)) || + (ehc->did_probe_mask & (1 << dev->devno))) + return 0; + + ata_eh_detach_dev(dev); + ata_dev_init(dev); + ehc->did_probe_mask |= (1 << dev->devno); + ehc->i.action |= ATA_EH_RESET; + ehc->saved_xfer_mode[dev->devno] = 0; + ehc->saved_ncq_enabled &= ~(1 << dev->devno); + + /* the link maybe in a deep sleep, wake it up */ + if (link->lpm_policy > ATA_LPM_MAX_POWER) { + if (ata_is_host_link(link)) + link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, + ATA_LPM_EMPTY); + else + sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, + ATA_LPM_EMPTY); + } + + /* Record and count probe trials on the ering. The specific + * error mask used is irrelevant. Because a successful device + * detection clears the ering, this count accumulates only if + * there are consecutive failed probes. + * + * If the count is equal to or higher than ATA_EH_PROBE_TRIALS + * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is + * forced to 1.5Gbps. + * + * This is to work around cases where failed link speed + * negotiation results in device misdetection leading to + * infinite DEVXCHG or PHRDY CHG events. + */ + ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); + ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); + + if (trials > ATA_EH_PROBE_TRIALS) + sata_down_spd_limit(link, 1); + + return 1; +} + +static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) +{ + struct ata_eh_context *ehc = &dev->link->eh_context; + + /* -EAGAIN from EH routine indicates retry without prejudice. + * The requester is responsible for ensuring forward progress. + */ + if (err != -EAGAIN) + ehc->tries[dev->devno]--; + + switch (err) { + case -ENODEV: + /* device missing or wrong IDENTIFY data, schedule probing */ + ehc->i.probe_mask |= (1 << dev->devno); + case -EINVAL: + /* give it just one more chance */ + ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); + case -EIO: + if (ehc->tries[dev->devno] == 1) { + /* This is the last chance, better to slow + * down than lose it. + */ + sata_down_spd_limit(ata_dev_phys_link(dev), 0); + if (dev->pio_mode > XFER_PIO_0) + ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); + } + } + + if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { + /* disable device if it has used up all its chances */ + ata_dev_disable(dev); + + /* detach if offline */ + if (ata_phys_link_offline(ata_dev_phys_link(dev))) + ata_eh_detach_dev(dev); + + /* schedule probe if necessary */ + if (ata_eh_schedule_probe(dev)) { + ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; + memset(ehc->cmd_timeout_idx[dev->devno], 0, + sizeof(ehc->cmd_timeout_idx[dev->devno])); + } + + return 1; + } else { + ehc->i.action |= ATA_EH_RESET; + return 0; + } +} + /** * ata_eh_recover - recover host port after error * @ap: host port to recover @@ -1900,12 +3682,13 @@ static int ata_eh_skip_recovery(struct ata_port *ap) * @softreset: softreset method (can be NULL) * @hardreset: hardreset method (can be NULL) * @postreset: postreset method (can be NULL) + * @r_failed_link: out parameter for failed link * * This is the alpha and omega, eum and yang, heart and soul of * libata exception handling. On entry, actions required to - * recover the port and hotplug requests are recorded in - * eh_context. This function executes all the operations with - * appropriate retrials and fallbacks to resurrect failed + * recover each link and hotplug requests are recorded in the + * link's eh_context. This function executes all the operations + * with appropriate retrials and fallbacks to resurrect failed * devices, detach goners and greet newcomers. * * LOCKING: @@ -1914,154 +3697,222 @@ static int ata_eh_skip_recovery(struct ata_port *ap) * RETURNS: * 0 on success, -errno on failure. */ -static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, - ata_reset_fn_t softreset, ata_reset_fn_t hardreset, - ata_postreset_fn_t postreset) +int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, + ata_reset_fn_t softreset, ata_reset_fn_t hardreset, + ata_postreset_fn_t postreset, + struct ata_link **r_failed_link) { - struct ata_eh_context *ehc = &ap->eh_context; + struct ata_link *link; struct ata_device *dev; - int down_xfermask, i, rc; + int rc, nr_fails; + unsigned long flags, deadline; DPRINTK("ENTER\n"); /* prep for recovery */ - for (i = 0; i < ATA_MAX_DEVICES; i++) { - dev = &ap->device[i]; + ata_for_each_link(link, ap, EDGE) { + struct ata_eh_context *ehc = &link->eh_context; - ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; + /* re-enable link? */ + if (ehc->i.action & ATA_EH_ENABLE_LINK) { + ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); + spin_lock_irqsave(ap->lock, flags); + link->flags &= ~ATA_LFLAG_DISABLED; + spin_unlock_irqrestore(ap->lock, flags); + ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); + } - /* process hotplug request */ - if (dev->flags & ATA_DFLAG_DETACH) - ata_eh_detach_dev(dev); + ata_for_each_dev(dev, link, ALL) { + if (link->flags & ATA_LFLAG_NO_RETRY) + ehc->tries[dev->devno] = 1; + else + ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; - if (!ata_dev_enabled(dev) && - ((ehc->i.probe_mask & (1 << dev->devno)) && - !(ehc->did_probe_mask & (1 << dev->devno)))) { - ata_eh_detach_dev(dev); - ata_dev_init(dev); - ehc->did_probe_mask |= (1 << dev->devno); - ehc->i.action |= ATA_EH_SOFTRESET; + /* collect port action mask recorded in dev actions */ + ehc->i.action |= ehc->i.dev_action[dev->devno] & + ~ATA_EH_PERDEV_MASK; + ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; + + /* process hotplug request */ + if (dev->flags & ATA_DFLAG_DETACH) + ata_eh_detach_dev(dev); + + /* schedule probe if necessary */ + if (!ata_dev_enabled(dev)) + ata_eh_schedule_probe(dev); } } retry: - down_xfermask = 0; rc = 0; /* if UNLOADING, finish immediately */ if (ap->pflags & ATA_PFLAG_UNLOADING) goto out; - /* prep for resume */ - ata_eh_prep_resume(ap); + /* prep for EH */ + ata_for_each_link(link, ap, EDGE) { + struct ata_eh_context *ehc = &link->eh_context; - /* skip EH if possible. */ - if (ata_eh_skip_recovery(ap)) - ehc->i.action = 0; + /* skip EH if possible. */ + if (ata_eh_skip_recovery(link)) + ehc->i.action = 0; - for (i = 0; i < ATA_MAX_DEVICES; i++) - ehc->classes[i] = ATA_DEV_UNKNOWN; + ata_for_each_dev(dev, link, ALL) + ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; + } /* reset */ - if (ehc->i.action & ATA_EH_RESET_MASK) { - ata_eh_freeze_port(ap); + ata_for_each_link(link, ap, EDGE) { + struct ata_eh_context *ehc = &link->eh_context; - rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset, - softreset, hardreset, postreset); + if (!(ehc->i.action & ATA_EH_RESET)) + continue; + + rc = ata_eh_reset(link, ata_link_nr_vacant(link), + prereset, softreset, hardreset, postreset); if (rc) { - ata_port_printk(ap, KERN_ERR, - "reset failed, giving up\n"); + ata_link_err(link, "reset failed, giving up\n"); goto out; } - - ata_eh_thaw_port(ap); } - /* revalidate existing devices and attach new ones */ - rc = ata_eh_revalidate_and_attach(ap, &dev); - if (rc) - goto dev_fail; + do { + unsigned long now; - /* resume devices */ - rc = ata_eh_resume(ap, &dev); - if (rc) - goto dev_fail; + /* + * clears ATA_EH_PARK in eh_info and resets + * ap->park_req_pending + */ + ata_eh_pull_park_action(ap); + + deadline = jiffies; + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ALL) { + struct ata_eh_context *ehc = &link->eh_context; + unsigned long tmp; + + if (dev->class != ATA_DEV_ATA) + continue; + if (!(ehc->i.dev_action[dev->devno] & + ATA_EH_PARK)) + continue; + tmp = dev->unpark_deadline; + if (time_before(deadline, tmp)) + deadline = tmp; + else if (time_before_eq(tmp, jiffies)) + continue; + if (ehc->unloaded_mask & (1 << dev->devno)) + continue; + + ata_eh_park_issue_cmd(dev, 1); + } + } - /* configure transfer mode if the port has been reset */ - if (ehc->i.flags & ATA_EHI_DID_RESET) { - rc = ata_set_mode(ap, &dev); - if (rc) { - down_xfermask = 1; - goto dev_fail; + now = jiffies; + if (time_before_eq(deadline, now)) + break; + + ata_eh_release(ap); + deadline = wait_for_completion_timeout(&ap->park_req_pending, + deadline - now); + ata_eh_acquire(ap); + } while (deadline); + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ALL) { + if (!(link->eh_context.unloaded_mask & + (1 << dev->devno))) + continue; + + ata_eh_park_issue_cmd(dev, 0); + ata_eh_done(link, dev, ATA_EH_PARK); } } - /* suspend devices */ - rc = ata_eh_suspend(ap, &dev); - if (rc) - goto dev_fail; + /* the rest */ + nr_fails = 0; + ata_for_each_link(link, ap, PMP_FIRST) { + struct ata_eh_context *ehc = &link->eh_context; - goto out; + if (sata_pmp_attached(ap) && ata_is_host_link(link)) + goto config_lpm; - dev_fail: - switch (rc) { - case -ENODEV: - /* device missing, schedule probing */ - ehc->i.probe_mask |= (1 << dev->devno); - case -EINVAL: - ehc->tries[dev->devno] = 0; - break; - case -EIO: - sata_down_spd_limit(ap); - default: - ehc->tries[dev->devno]--; - if (down_xfermask && - ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1)) - ehc->tries[dev->devno] = 0; - } + /* revalidate existing devices and attach new ones */ + rc = ata_eh_revalidate_and_attach(link, &dev); + if (rc) + goto rest_fail; - if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { - /* disable device if it has used up all its chances */ - ata_dev_disable(dev); + /* if PMP got attached, return, pmp EH will take care of it */ + if (link->device->class == ATA_DEV_PMP) { + ehc->i.action = 0; + return 0; + } - /* detach if offline */ - if (ata_port_offline(ap)) - ata_eh_detach_dev(dev); + /* configure transfer mode if necessary */ + if (ehc->i.flags & ATA_EHI_SETMODE) { + rc = ata_set_mode(link, &dev); + if (rc) + goto rest_fail; + ehc->i.flags &= ~ATA_EHI_SETMODE; + } - /* probe if requested */ - if ((ehc->i.probe_mask & (1 << dev->devno)) && - !(ehc->did_probe_mask & (1 << dev->devno))) { - ata_eh_detach_dev(dev); - ata_dev_init(dev); + /* If reset has been issued, clear UA to avoid + * disrupting the current users of the device. + */ + if (ehc->i.flags & ATA_EHI_DID_RESET) { + ata_for_each_dev(dev, link, ALL) { + if (dev->class != ATA_DEV_ATAPI) + continue; + rc = atapi_eh_clear_ua(dev); + if (rc) + goto rest_fail; + if (zpodd_dev_enabled(dev)) + zpodd_post_poweron(dev); + } + } - ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; - ehc->did_probe_mask |= (1 << dev->devno); - ehc->i.action |= ATA_EH_SOFTRESET; + /* retry flush if necessary */ + ata_for_each_dev(dev, link, ALL) { + if (dev->class != ATA_DEV_ATA) + continue; + rc = ata_eh_maybe_retry_flush(dev); + if (rc) + goto rest_fail; } - } else { - /* soft didn't work? be haaaaard */ - if (ehc->i.flags & ATA_EHI_DID_RESET) - ehc->i.action |= ATA_EH_HARDRESET; - else - ehc->i.action |= ATA_EH_SOFTRESET; - } - if (ata_port_nr_enabled(ap)) { - ata_port_printk(ap, KERN_WARNING, "failed to recover some " - "devices, retrying in 5 secs\n"); - ssleep(5); - } else { - /* no device left, repeat fast */ - msleep(500); + config_lpm: + /* configure link power saving */ + if (link->lpm_policy != ap->target_lpm_policy) { + rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); + if (rc) + goto rest_fail; + } + + /* this link is okay now */ + ehc->i.flags = 0; + continue; + + rest_fail: + nr_fails++; + if (dev) + ata_eh_handle_dev_fail(dev, rc); + + if (ap->pflags & ATA_PFLAG_FROZEN) { + /* PMP reset requires working host port. + * Can't retry if it's frozen. + */ + if (sata_pmp_attached(ap)) + goto out; + break; + } } - goto retry; + if (nr_fails) + goto retry; out: - if (rc) { - for (i = 0; i < ATA_MAX_DEVICES; i++) - ata_dev_disable(&ap->device[i]); - } + if (rc && r_failed_link) + *r_failed_link = link; DPRINTK("EXIT, rc=%d\n", rc); return rc; @@ -2077,7 +3928,7 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, * LOCKING: * None. */ -static void ata_eh_finish(struct ata_port *ap) +void ata_eh_finish(struct ata_port *ap) { int tag; @@ -2093,10 +3944,10 @@ static void ata_eh_finish(struct ata_port *ap) * generate sense data in this function, * considering both err_mask and tf. */ - if (qc->err_mask & AC_ERR_INVALID) - ata_eh_qc_complete(qc); - else + if (qc->flags & ATA_QCFLAG_RETRY) ata_eh_qc_retry(qc); + else + ata_eh_qc_complete(qc); } else { if (qc->flags & ATA_QCFLAG_SENSE_VALID) { ata_eh_qc_complete(qc); @@ -2107,11 +3958,16 @@ static void ata_eh_finish(struct ata_port *ap) } } } + + /* make sure nr_active_links is zero after EH */ + WARN_ON(ap->nr_active_links); + ap->nr_active_links = 0; } /** * ata_do_eh - do standard error handling * @ap: host port to handle error for + * * @prereset: prereset method (can be NULL) * @softreset: softreset method (can be NULL) * @hardreset: hardreset method (can be NULL) @@ -2126,13 +3982,45 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { + struct ata_device *dev; + int rc; + ata_eh_autopsy(ap); ata_eh_report(ap); - ata_eh_recover(ap, prereset, softreset, hardreset, postreset); + + rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, + NULL); + if (rc) { + ata_for_each_dev(dev, &ap->link, ALL) + ata_dev_disable(dev); + } + ata_eh_finish(ap); } /** + * ata_std_error_handler - standard error handler + * @ap: host port to handle error for + * + * Standard error handler + * + * LOCKING: + * Kernel thread context (may sleep). + */ +void ata_std_error_handler(struct ata_port *ap) +{ + struct ata_port_operations *ops = ap->ops; + ata_reset_fn_t hardreset = ops->hardreset; + + /* ignore built-in hardreset if SCR access is not available */ + if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) + hardreset = NULL; + + ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); +} + +#ifdef CONFIG_PM +/** * ata_eh_handle_port_suspend - perform port suspend operation * @ap: port to suspend * @@ -2145,11 +4033,12 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap) { unsigned long flags; int rc = 0; + struct ata_device *dev; /* are we suspending? */ spin_lock_irqsave(ap->lock, flags); if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || - ap->pm_mesg.event == PM_EVENT_ON) { + ap->pm_mesg.event & PM_EVENT_RESUME) { spin_unlock_irqrestore(ap->lock, flags); return; } @@ -2157,26 +4046,40 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap) WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); + /* + * If we have a ZPODD attached, check its zero + * power ready status before the port is frozen. + * Only needed for runtime suspend. + */ + if (PMSG_IS_AUTO(ap->pm_mesg)) { + ata_for_each_dev(dev, &ap->link, ENABLED) { + if (zpodd_dev_enabled(dev)) + zpodd_on_suspend(dev); + } + } + + /* tell ACPI we're suspending */ + rc = ata_acpi_on_suspend(ap); + if (rc) + goto out; + /* suspend */ ata_eh_freeze_port(ap); if (ap->ops->port_suspend) rc = ap->ops->port_suspend(ap, ap->pm_mesg); - /* report result */ + ata_acpi_set_state(ap, ap->pm_mesg); + out: + /* update the flags */ spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~ATA_PFLAG_PM_PENDING; if (rc == 0) ap->pflags |= ATA_PFLAG_SUSPENDED; - else + else if (ap->pflags & ATA_PFLAG_FROZEN) ata_port_schedule_eh(ap); - if (ap->pm_result) { - *ap->pm_result = rc; - ap->pm_result = NULL; - } - spin_unlock_irqrestore(ap->lock, flags); return; @@ -2188,62 +4091,49 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap) * * Resume @ap. * - * This function also waits upto one second until all devices - * hanging off this port requests resume EH action. This is to - * prevent invoking EH and thus reset multiple times on resume. - * - * On DPM resume, where some of devices might not be resumed - * together, this may delay port resume upto one second, but such - * DPM resumes are rare and 1 sec delay isn't too bad. - * * LOCKING: * Kernel thread context (may sleep). */ static void ata_eh_handle_port_resume(struct ata_port *ap) { - unsigned long timeout; + struct ata_link *link; + struct ata_device *dev; unsigned long flags; - int i, rc = 0; + int rc = 0; /* are we resuming? */ spin_lock_irqsave(ap->lock, flags); if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || - ap->pm_mesg.event != PM_EVENT_ON) { + !(ap->pm_mesg.event & PM_EVENT_RESUME)) { spin_unlock_irqrestore(ap->lock, flags); return; } spin_unlock_irqrestore(ap->lock, flags); - /* spurious? */ - if (!(ap->pflags & ATA_PFLAG_SUSPENDED)) - goto done; + WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); - if (ap->ops->port_resume) - rc = ap->ops->port_resume(ap); + /* + * Error timestamps are in jiffies which doesn't run while + * suspended and PHY events during resume isn't too uncommon. + * When the two are combined, it can lead to unnecessary speed + * downs if the machine is suspended and resumed repeatedly. + * Clear error history. + */ + ata_for_each_link(link, ap, HOST_FIRST) + ata_for_each_dev(dev, link, ALL) + ata_ering_clear(&dev->ering); - /* give devices time to request EH */ - timeout = jiffies + HZ; /* 1s max */ - while (1) { - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; - unsigned int action = ata_eh_dev_action(dev); + ata_acpi_set_state(ap, ap->pm_mesg); - if ((dev->flags & ATA_DFLAG_SUSPENDED) && - !(action & ATA_EH_RESUME)) - break; - } + if (ap->ops->port_resume) + rc = ap->ops->port_resume(ap); - if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout)) - break; - msleep(10); - } + /* tell ACPI that we're resuming */ + ata_acpi_on_resume(ap); - done: + /* update the flags */ spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); - if (ap->pm_result) { - *ap->pm_result = rc; - ap->pm_result = NULL; - } spin_unlock_irqrestore(ap->lock, flags); } +#endif /* CONFIG_PM */ diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c new file mode 100644 index 00000000000..7ccc084bf1d --- /dev/null +++ b/drivers/ata/libata-pmp.c @@ -0,0 +1,1106 @@ +/* + * libata-pmp.c - libata port multiplier support + * + * Copyright (c) 2007 SUSE Linux Products GmbH + * Copyright (c) 2007 Tejun Heo <teheo@suse.de> + * + * This file is released under the GPLv2. + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/libata.h> +#include <linux/slab.h> +#include "libata.h" +#include "libata-transport.h" + +const struct ata_port_operations sata_pmp_port_ops = { + .inherits = &sata_port_ops, + .pmp_prereset = ata_std_prereset, + .pmp_hardreset = sata_std_hardreset, + .pmp_postreset = ata_std_postreset, + .error_handler = sata_pmp_error_handler, +}; + +/** + * sata_pmp_read - read PMP register + * @link: link to read PMP register for + * @reg: register to read + * @r_val: resulting value + * + * Read PMP register. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, AC_ERR_* mask on failure. + */ +static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val) +{ + struct ata_port *ap = link->ap; + struct ata_device *pmp_dev = ap->link.device; + struct ata_taskfile tf; + unsigned int err_mask; + + ata_tf_init(pmp_dev, &tf); + tf.command = ATA_CMD_PMP_READ; + tf.protocol = ATA_PROT_NODATA; + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; + tf.feature = reg; + tf.device = link->pmp; + + err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0, + SATA_PMP_RW_TIMEOUT); + if (err_mask) + return err_mask; + + *r_val = tf.nsect | tf.lbal << 8 | tf.lbam << 16 | tf.lbah << 24; + return 0; +} + +/** + * sata_pmp_write - write PMP register + * @link: link to write PMP register for + * @reg: register to write + * @r_val: value to write + * + * Write PMP register. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, AC_ERR_* mask on failure. + */ +static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32 val) +{ + struct ata_port *ap = link->ap; + struct ata_device *pmp_dev = ap->link.device; + struct ata_taskfile tf; + + ata_tf_init(pmp_dev, &tf); + tf.command = ATA_CMD_PMP_WRITE; + tf.protocol = ATA_PROT_NODATA; + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; + tf.feature = reg; + tf.device = link->pmp; + tf.nsect = val & 0xff; + tf.lbal = (val >> 8) & 0xff; + tf.lbam = (val >> 16) & 0xff; + tf.lbah = (val >> 24) & 0xff; + + return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0, + SATA_PMP_RW_TIMEOUT); +} + +/** + * sata_pmp_qc_defer_cmd_switch - qc_defer for command switching PMP + * @qc: ATA command in question + * + * A host which has command switching PMP support cannot issue + * commands to multiple links simultaneously. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * ATA_DEFER_* if deferring is needed, 0 otherwise. + */ +int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc) +{ + struct ata_link *link = qc->dev->link; + struct ata_port *ap = link->ap; + + if (ap->excl_link == NULL || ap->excl_link == link) { + if (ap->nr_active_links == 0 || ata_link_active(link)) { + qc->flags |= ATA_QCFLAG_CLEAR_EXCL; + return ata_std_qc_defer(qc); + } + + ap->excl_link = link; + } + + return ATA_DEFER_PORT; +} + +/** + * sata_pmp_scr_read - read PSCR + * @link: ATA link to read PSCR for + * @reg: PSCR to read + * @r_val: resulting value + * + * Read PSCR @reg into @r_val for @link, to be called from + * ata_scr_read(). + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *r_val) +{ + unsigned int err_mask; + + if (reg > SATA_PMP_PSCR_CONTROL) + return -EINVAL; + + err_mask = sata_pmp_read(link, reg, r_val); + if (err_mask) { + ata_link_warn(link, "failed to read SCR %d (Emask=0x%x)\n", + reg, err_mask); + return -EIO; + } + return 0; +} + +/** + * sata_pmp_scr_write - write PSCR + * @link: ATA link to write PSCR for + * @reg: PSCR to write + * @val: value to be written + * + * Write @val to PSCR @reg for @link, to be called from + * ata_scr_write() and ata_scr_write_flush(). + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val) +{ + unsigned int err_mask; + + if (reg > SATA_PMP_PSCR_CONTROL) + return -EINVAL; + + err_mask = sata_pmp_write(link, reg, val); + if (err_mask) { + ata_link_warn(link, "failed to write SCR %d (Emask=0x%x)\n", + reg, err_mask); + return -EIO; + } + return 0; +} + +/** + * sata_pmp_set_lpm - configure LPM for a PMP link + * @link: PMP link to configure LPM for + * @policy: target LPM policy + * @hints: LPM hints + * + * Configure LPM for @link. This function will contain any PMP + * specific workarounds if necessary. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int sata_pmp_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + unsigned hints) +{ + return sata_link_scr_lpm(link, policy, true); +} + +/** + * sata_pmp_read_gscr - read GSCR block of SATA PMP + * @dev: PMP device + * @gscr: buffer to read GSCR block into + * + * Read selected PMP GSCRs from the PMP at @dev. This will serve + * as configuration and identification info for the PMP. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno on failure. + */ +static int sata_pmp_read_gscr(struct ata_device *dev, u32 *gscr) +{ + static const int gscr_to_read[] = { 0, 1, 2, 32, 33, 64, 96 }; + int i; + + for (i = 0; i < ARRAY_SIZE(gscr_to_read); i++) { + int reg = gscr_to_read[i]; + unsigned int err_mask; + + err_mask = sata_pmp_read(dev->link, reg, &gscr[reg]); + if (err_mask) { + ata_dev_err(dev, "failed to read PMP GSCR[%d] (Emask=0x%x)\n", + reg, err_mask); + return -EIO; + } + } + + return 0; +} + +static const char *sata_pmp_spec_rev_str(const u32 *gscr) +{ + u32 rev = gscr[SATA_PMP_GSCR_REV]; + + if (rev & (1 << 3)) + return "1.2"; + if (rev & (1 << 2)) + return "1.1"; + if (rev & (1 << 1)) + return "1.0"; + return "<unknown>"; +} + +#define PMP_GSCR_SII_POL 129 + +static int sata_pmp_configure(struct ata_device *dev, int print_info) +{ + struct ata_port *ap = dev->link->ap; + u32 *gscr = dev->gscr; + u16 vendor = sata_pmp_gscr_vendor(gscr); + u16 devid = sata_pmp_gscr_devid(gscr); + unsigned int err_mask = 0; + const char *reason; + int nr_ports, rc; + + nr_ports = sata_pmp_gscr_ports(gscr); + + if (nr_ports <= 0 || nr_ports > SATA_PMP_MAX_PORTS) { + rc = -EINVAL; + reason = "invalid nr_ports"; + goto fail; + } + + if ((ap->flags & ATA_FLAG_AN) && + (gscr[SATA_PMP_GSCR_FEAT] & SATA_PMP_FEAT_NOTIFY)) + dev->flags |= ATA_DFLAG_AN; + + /* monitor SERR_PHYRDY_CHG on fan-out ports */ + err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_ERROR_EN, + SERR_PHYRDY_CHG); + if (err_mask) { + rc = -EIO; + reason = "failed to write GSCR_ERROR_EN"; + goto fail; + } + + /* Disable sending Early R_OK. + * With "cached read" HDD testing and multiple ports busy on a SATA + * host controller, 3x26 PMP will very rarely drop a deferred + * R_OK that was intended for the host. Symptom will be all + * 5 drives under test will timeout, get reset, and recover. + */ + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { + u32 reg; + + err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); + if (err_mask) { + rc = -EIO; + reason = "failed to read Sil3x26 Private Register"; + goto fail; + } + reg &= ~0x1; + err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); + if (err_mask) { + rc = -EIO; + reason = "failed to write Sil3x26 Private Register"; + goto fail; + } + } + + if (print_info) { + ata_dev_info(dev, "Port Multiplier %s, " + "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n", + sata_pmp_spec_rev_str(gscr), vendor, devid, + sata_pmp_gscr_rev(gscr), + nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN], + gscr[SATA_PMP_GSCR_FEAT]); + + if (!(dev->flags & ATA_DFLAG_AN)) + ata_dev_info(dev, + "Asynchronous notification not supported, " + "hotplug won't work on fan-out ports. Use warm-plug instead.\n"); + } + + return 0; + + fail: + ata_dev_err(dev, + "failed to configure Port Multiplier (%s, Emask=0x%x)\n", + reason, err_mask); + return rc; +} + +static int sata_pmp_init_links (struct ata_port *ap, int nr_ports) +{ + struct ata_link *pmp_link = ap->pmp_link; + int i, err; + + if (!pmp_link) { + pmp_link = kzalloc(sizeof(pmp_link[0]) * SATA_PMP_MAX_PORTS, + GFP_NOIO); + if (!pmp_link) + return -ENOMEM; + + for (i = 0; i < SATA_PMP_MAX_PORTS; i++) + ata_link_init(ap, &pmp_link[i], i); + + ap->pmp_link = pmp_link; + + for (i = 0; i < SATA_PMP_MAX_PORTS; i++) { + err = ata_tlink_add(&pmp_link[i]); + if (err) { + goto err_tlink; + } + } + } + + for (i = 0; i < nr_ports; i++) { + struct ata_link *link = &pmp_link[i]; + struct ata_eh_context *ehc = &link->eh_context; + + link->flags = 0; + ehc->i.probe_mask |= ATA_ALL_DEVICES; + ehc->i.action |= ATA_EH_RESET; + } + + return 0; + err_tlink: + while (--i >= 0) + ata_tlink_delete(&pmp_link[i]); + kfree(pmp_link); + ap->pmp_link = NULL; + return err; +} + +static void sata_pmp_quirks(struct ata_port *ap) +{ + u32 *gscr = ap->link.device->gscr; + u16 vendor = sata_pmp_gscr_vendor(gscr); + u16 devid = sata_pmp_gscr_devid(gscr); + struct ata_link *link; + + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { + /* sil3x26 quirks */ + ata_for_each_link(link, ap, EDGE) { + /* link reports offline after LPM */ + link->flags |= ATA_LFLAG_NO_LPM; + + /* + * Class code report is unreliable and SRST times + * out under certain configurations. + */ + if (link->pmp < 5) + link->flags |= ATA_LFLAG_NO_SRST | + ATA_LFLAG_ASSUME_ATA; + + /* port 5 is for SEMB device and it doesn't like SRST */ + if (link->pmp == 5) + link->flags |= ATA_LFLAG_NO_SRST | + ATA_LFLAG_ASSUME_SEMB; + } + } else if (vendor == 0x1095 && devid == 0x4723) { + /* + * sil4723 quirks + * + * Link reports offline after LPM. Class code report is + * unreliable. SIMG PMPs never got SRST reliable and the + * config device at port 2 locks up on SRST. + */ + ata_for_each_link(link, ap, EDGE) + link->flags |= ATA_LFLAG_NO_LPM | + ATA_LFLAG_NO_SRST | + ATA_LFLAG_ASSUME_ATA; + } else if (vendor == 0x1095 && devid == 0x4726) { + /* sil4726 quirks */ + ata_for_each_link(link, ap, EDGE) { + /* link reports offline after LPM */ + link->flags |= ATA_LFLAG_NO_LPM; + + /* Class code report is unreliable and SRST + * times out under certain configurations. + * Config device can be at port 0 or 5 and + * locks up on SRST. + */ + if (link->pmp <= 5) + link->flags |= ATA_LFLAG_NO_SRST | + ATA_LFLAG_ASSUME_ATA; + + /* Port 6 is for SEMB device which doesn't + * like SRST either. + */ + if (link->pmp == 6) + link->flags |= ATA_LFLAG_NO_SRST | + ATA_LFLAG_ASSUME_SEMB; + } + } else if (vendor == 0x1095 && (devid == 0x5723 || devid == 0x5733 || + devid == 0x5734 || devid == 0x5744)) { + /* sil5723/5744 quirks */ + + /* sil5723/5744 has either two or three downstream + * ports depending on operation mode. The last port + * is empty if any actual IO device is available or + * occupied by a pseudo configuration device + * otherwise. Don't try hard to recover it. + */ + ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY; + } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) { + /* + * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350? + * 0x0325: jmicron JMB394. + */ + ata_for_each_link(link, ap, EDGE) { + /* SRST breaks detection and disks get misclassified + * LPM disabled to avoid potential problems + */ + link->flags |= ATA_LFLAG_NO_LPM | + ATA_LFLAG_NO_SRST | + ATA_LFLAG_ASSUME_ATA; + } + } +} + +/** + * sata_pmp_attach - attach a SATA PMP device + * @dev: SATA PMP device to attach + * + * Configure and attach SATA PMP device @dev. This function is + * also responsible for allocating and initializing PMP links. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int sata_pmp_attach(struct ata_device *dev) +{ + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; + unsigned long flags; + struct ata_link *tlink; + int rc; + + /* is it hanging off the right place? */ + if (!sata_pmp_supported(ap)) { + ata_dev_err(dev, "host does not support Port Multiplier\n"); + return -EINVAL; + } + + if (!ata_is_host_link(link)) { + ata_dev_err(dev, "Port Multipliers cannot be nested\n"); + return -EINVAL; + } + + if (dev->devno) { + ata_dev_err(dev, "Port Multiplier must be the first device\n"); + return -EINVAL; + } + + WARN_ON(link->pmp != 0); + link->pmp = SATA_PMP_CTRL_PORT; + + /* read GSCR block */ + rc = sata_pmp_read_gscr(dev, dev->gscr); + if (rc) + goto fail; + + /* config PMP */ + rc = sata_pmp_configure(dev, 1); + if (rc) + goto fail; + + rc = sata_pmp_init_links(ap, sata_pmp_gscr_ports(dev->gscr)); + if (rc) { + ata_dev_info(dev, "failed to initialize PMP links\n"); + goto fail; + } + + /* attach it */ + spin_lock_irqsave(ap->lock, flags); + WARN_ON(ap->nr_pmp_links); + ap->nr_pmp_links = sata_pmp_gscr_ports(dev->gscr); + spin_unlock_irqrestore(ap->lock, flags); + + sata_pmp_quirks(ap); + + if (ap->ops->pmp_attach) + ap->ops->pmp_attach(ap); + + ata_for_each_link(tlink, ap, EDGE) + sata_link_init_spd(tlink); + + return 0; + + fail: + link->pmp = 0; + return rc; +} + +/** + * sata_pmp_detach - detach a SATA PMP device + * @dev: SATA PMP device to detach + * + * Detach SATA PMP device @dev. This function is also + * responsible for deconfiguring PMP links. + * + * LOCKING: + * Kernel thread context (may sleep). + */ +static void sata_pmp_detach(struct ata_device *dev) +{ + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; + struct ata_link *tlink; + unsigned long flags; + + ata_dev_info(dev, "Port Multiplier detaching\n"); + + WARN_ON(!ata_is_host_link(link) || dev->devno || + link->pmp != SATA_PMP_CTRL_PORT); + + if (ap->ops->pmp_detach) + ap->ops->pmp_detach(ap); + + ata_for_each_link(tlink, ap, EDGE) + ata_eh_detach_dev(tlink->device); + + spin_lock_irqsave(ap->lock, flags); + ap->nr_pmp_links = 0; + link->pmp = 0; + spin_unlock_irqrestore(ap->lock, flags); +} + +/** + * sata_pmp_same_pmp - does new GSCR matches the configured PMP? + * @dev: PMP device to compare against + * @new_gscr: GSCR block of the new device + * + * Compare @new_gscr against @dev and determine whether @dev is + * the PMP described by @new_gscr. + * + * LOCKING: + * None. + * + * RETURNS: + * 1 if @dev matches @new_gscr, 0 otherwise. + */ +static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr) +{ + const u32 *old_gscr = dev->gscr; + u16 old_vendor, new_vendor, old_devid, new_devid; + int old_nr_ports, new_nr_ports; + + old_vendor = sata_pmp_gscr_vendor(old_gscr); + new_vendor = sata_pmp_gscr_vendor(new_gscr); + old_devid = sata_pmp_gscr_devid(old_gscr); + new_devid = sata_pmp_gscr_devid(new_gscr); + old_nr_ports = sata_pmp_gscr_ports(old_gscr); + new_nr_ports = sata_pmp_gscr_ports(new_gscr); + + if (old_vendor != new_vendor) { + ata_dev_info(dev, + "Port Multiplier vendor mismatch '0x%x' != '0x%x'\n", + old_vendor, new_vendor); + return 0; + } + + if (old_devid != new_devid) { + ata_dev_info(dev, + "Port Multiplier device ID mismatch '0x%x' != '0x%x'\n", + old_devid, new_devid); + return 0; + } + + if (old_nr_ports != new_nr_ports) { + ata_dev_info(dev, + "Port Multiplier nr_ports mismatch '0x%x' != '0x%x'\n", + old_nr_ports, new_nr_ports); + return 0; + } + + return 1; +} + +/** + * sata_pmp_revalidate - revalidate SATA PMP + * @dev: PMP device to revalidate + * @new_class: new class code + * + * Re-read GSCR block and make sure @dev is still attached to the + * port and properly configured. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class) +{ + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; + u32 *gscr = (void *)ap->sector_buf; + int rc; + + DPRINTK("ENTER\n"); + + ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE); + + if (!ata_dev_enabled(dev)) { + rc = -ENODEV; + goto fail; + } + + /* wrong class? */ + if (ata_class_enabled(new_class) && new_class != ATA_DEV_PMP) { + rc = -ENODEV; + goto fail; + } + + /* read GSCR */ + rc = sata_pmp_read_gscr(dev, gscr); + if (rc) + goto fail; + + /* is the pmp still there? */ + if (!sata_pmp_same_pmp(dev, gscr)) { + rc = -ENODEV; + goto fail; + } + + memcpy(dev->gscr, gscr, sizeof(gscr[0]) * SATA_PMP_GSCR_DWORDS); + + rc = sata_pmp_configure(dev, 0); + if (rc) + goto fail; + + ata_eh_done(link, NULL, ATA_EH_REVALIDATE); + + DPRINTK("EXIT, rc=0\n"); + return 0; + + fail: + ata_dev_err(dev, "PMP revalidation failed (errno=%d)\n", rc); + DPRINTK("EXIT, rc=%d\n", rc); + return rc; +} + +/** + * sata_pmp_revalidate_quick - revalidate SATA PMP quickly + * @dev: PMP device to revalidate + * + * Make sure the attached PMP is accessible. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +static int sata_pmp_revalidate_quick(struct ata_device *dev) +{ + unsigned int err_mask; + u32 prod_id; + + err_mask = sata_pmp_read(dev->link, SATA_PMP_GSCR_PROD_ID, &prod_id); + if (err_mask) { + ata_dev_err(dev, + "failed to read PMP product ID (Emask=0x%x)\n", + err_mask); + return -EIO; + } + + if (prod_id != dev->gscr[SATA_PMP_GSCR_PROD_ID]) { + ata_dev_err(dev, "PMP product ID mismatch\n"); + /* something weird is going on, request full PMP recovery */ + return -EIO; + } + + return 0; +} + +/** + * sata_pmp_eh_recover_pmp - recover PMP + * @ap: ATA port PMP is attached to + * @prereset: prereset method (can be NULL) + * @softreset: softreset method + * @hardreset: hardreset method + * @postreset: postreset method (can be NULL) + * + * Recover PMP attached to @ap. Recovery procedure is somewhat + * similar to that of ata_eh_recover() except that reset should + * always be performed in hard->soft sequence and recovery + * failure results in PMP detachment. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno on failure. + */ +static int sata_pmp_eh_recover_pmp(struct ata_port *ap, + ata_prereset_fn_t prereset, ata_reset_fn_t softreset, + ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) +{ + struct ata_link *link = &ap->link; + struct ata_eh_context *ehc = &link->eh_context; + struct ata_device *dev = link->device; + int tries = ATA_EH_PMP_TRIES; + int detach = 0, rc = 0; + int reval_failed = 0; + + DPRINTK("ENTER\n"); + + if (dev->flags & ATA_DFLAG_DETACH) { + detach = 1; + goto fail; + } + + retry: + ehc->classes[0] = ATA_DEV_UNKNOWN; + + if (ehc->i.action & ATA_EH_RESET) { + struct ata_link *tlink; + + /* reset */ + rc = ata_eh_reset(link, 0, prereset, softreset, hardreset, + postreset); + if (rc) { + ata_link_err(link, "failed to reset PMP, giving up\n"); + goto fail; + } + + /* PMP is reset, SErrors cannot be trusted, scan all */ + ata_for_each_link(tlink, ap, EDGE) { + struct ata_eh_context *ehc = &tlink->eh_context; + + ehc->i.probe_mask |= ATA_ALL_DEVICES; + ehc->i.action |= ATA_EH_RESET; + } + } + + /* If revalidation is requested, revalidate and reconfigure; + * otherwise, do quick revalidation. + */ + if (ehc->i.action & ATA_EH_REVALIDATE) + rc = sata_pmp_revalidate(dev, ehc->classes[0]); + else + rc = sata_pmp_revalidate_quick(dev); + + if (rc) { + tries--; + + if (rc == -ENODEV) { + ehc->i.probe_mask |= ATA_ALL_DEVICES; + detach = 1; + /* give it just two more chances */ + tries = min(tries, 2); + } + + if (tries) { + /* consecutive revalidation failures? speed down */ + if (reval_failed) + sata_down_spd_limit(link, 0); + else + reval_failed = 1; + + ehc->i.action |= ATA_EH_RESET; + goto retry; + } else { + ata_dev_err(dev, + "failed to recover PMP after %d tries, giving up\n", + ATA_EH_PMP_TRIES); + goto fail; + } + } + + /* okay, PMP resurrected */ + ehc->i.flags = 0; + + DPRINTK("EXIT, rc=0\n"); + return 0; + + fail: + sata_pmp_detach(dev); + if (detach) + ata_eh_detach_dev(dev); + else + ata_dev_disable(dev); + + DPRINTK("EXIT, rc=%d\n", rc); + return rc; +} + +static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap) +{ + struct ata_link *link; + unsigned long flags; + int rc; + + spin_lock_irqsave(ap->lock, flags); + + ata_for_each_link(link, ap, EDGE) { + if (!(link->flags & ATA_LFLAG_DISABLED)) + continue; + + spin_unlock_irqrestore(ap->lock, flags); + + /* Some PMPs require hardreset sequence to get + * SError.N working. + */ + sata_link_hardreset(link, sata_deb_timing_normal, + ata_deadline(jiffies, ATA_TMOUT_INTERNAL_QUICK), + NULL, NULL); + + /* unconditionally clear SError.N */ + rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); + if (rc) { + ata_link_err(link, + "failed to clear SError.N (errno=%d)\n", + rc); + return rc; + } + + spin_lock_irqsave(ap->lock, flags); + } + + spin_unlock_irqrestore(ap->lock, flags); + + return 0; +} + +static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries) +{ + struct ata_port *ap = link->ap; + unsigned long flags; + + if (link_tries[link->pmp] && --link_tries[link->pmp]) + return 1; + + /* disable this link */ + if (!(link->flags & ATA_LFLAG_DISABLED)) { + ata_link_warn(link, + "failed to recover link after %d tries, disabling\n", + ATA_EH_PMP_LINK_TRIES); + + spin_lock_irqsave(ap->lock, flags); + link->flags |= ATA_LFLAG_DISABLED; + spin_unlock_irqrestore(ap->lock, flags); + } + + ata_dev_disable(link->device); + link->eh_context.i.action = 0; + + return 0; +} + +/** + * sata_pmp_eh_recover - recover PMP-enabled port + * @ap: ATA port to recover + * + * Drive EH recovery operation for PMP enabled port @ap. This + * function recovers host and PMP ports with proper retrials and + * fallbacks. Actual recovery operations are performed using + * ata_eh_recover() and sata_pmp_eh_recover_pmp(). + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno on failure. + */ +static int sata_pmp_eh_recover(struct ata_port *ap) +{ + struct ata_port_operations *ops = ap->ops; + int pmp_tries, link_tries[SATA_PMP_MAX_PORTS]; + struct ata_link *pmp_link = &ap->link; + struct ata_device *pmp_dev = pmp_link->device; + struct ata_eh_context *pmp_ehc = &pmp_link->eh_context; + u32 *gscr = pmp_dev->gscr; + struct ata_link *link; + struct ata_device *dev; + unsigned int err_mask; + u32 gscr_error, sntf; + int cnt, rc; + + pmp_tries = ATA_EH_PMP_TRIES; + ata_for_each_link(link, ap, EDGE) + link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES; + + retry: + /* PMP attached? */ + if (!sata_pmp_attached(ap)) { + rc = ata_eh_recover(ap, ops->prereset, ops->softreset, + ops->hardreset, ops->postreset, NULL); + if (rc) { + ata_for_each_dev(dev, &ap->link, ALL) + ata_dev_disable(dev); + return rc; + } + + if (pmp_dev->class != ATA_DEV_PMP) + return 0; + + /* new PMP online */ + ata_for_each_link(link, ap, EDGE) + link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES; + + /* fall through */ + } + + /* recover pmp */ + rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset, + ops->hardreset, ops->postreset); + if (rc) + goto pmp_fail; + + /* PHY event notification can disturb reset and other recovery + * operations. Turn it off. + */ + if (gscr[SATA_PMP_GSCR_FEAT_EN] & SATA_PMP_FEAT_NOTIFY) { + gscr[SATA_PMP_GSCR_FEAT_EN] &= ~SATA_PMP_FEAT_NOTIFY; + + err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN, + gscr[SATA_PMP_GSCR_FEAT_EN]); + if (err_mask) { + ata_link_warn(pmp_link, + "failed to disable NOTIFY (err_mask=0x%x)\n", + err_mask); + goto pmp_fail; + } + } + + /* handle disabled links */ + rc = sata_pmp_eh_handle_disabled_links(ap); + if (rc) + goto pmp_fail; + + /* recover links */ + rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset, + ops->pmp_hardreset, ops->pmp_postreset, &link); + if (rc) + goto link_fail; + + /* clear SNotification */ + rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); + if (rc == 0) + sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); + + /* + * If LPM is active on any fan-out port, hotplug wouldn't + * work. Return w/ PHY event notification disabled. + */ + ata_for_each_link(link, ap, EDGE) + if (link->lpm_policy > ATA_LPM_MAX_POWER) + return 0; + + /* + * Connection status might have changed while resetting other + * links, enable notification and check SATA_PMP_GSCR_ERROR + * before returning. + */ + + /* enable notification */ + if (pmp_dev->flags & ATA_DFLAG_AN) { + gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY; + + err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN, + gscr[SATA_PMP_GSCR_FEAT_EN]); + if (err_mask) { + ata_dev_err(pmp_dev, + "failed to write PMP_FEAT_EN (Emask=0x%x)\n", + err_mask); + rc = -EIO; + goto pmp_fail; + } + } + + /* check GSCR_ERROR */ + err_mask = sata_pmp_read(pmp_link, SATA_PMP_GSCR_ERROR, &gscr_error); + if (err_mask) { + ata_dev_err(pmp_dev, + "failed to read PMP_GSCR_ERROR (Emask=0x%x)\n", + err_mask); + rc = -EIO; + goto pmp_fail; + } + + cnt = 0; + ata_for_each_link(link, ap, EDGE) { + if (!(gscr_error & (1 << link->pmp))) + continue; + + if (sata_pmp_handle_link_fail(link, link_tries)) { + ata_ehi_hotplugged(&link->eh_context.i); + cnt++; + } else { + ata_link_warn(link, + "PHY status changed but maxed out on retries, giving up\n"); + ata_link_warn(link, + "Manually issue scan to resume this link\n"); + } + } + + if (cnt) { + ata_port_info(ap, + "PMP SError.N set for some ports, repeating recovery\n"); + goto retry; + } + + return 0; + + link_fail: + if (sata_pmp_handle_link_fail(link, link_tries)) { + pmp_ehc->i.action |= ATA_EH_RESET; + goto retry; + } + + /* fall through */ + pmp_fail: + /* Control always ends up here after detaching PMP. Shut up + * and return if we're unloading. + */ + if (ap->pflags & ATA_PFLAG_UNLOADING) + return rc; + + if (!sata_pmp_attached(ap)) + goto retry; + + if (--pmp_tries) { + pmp_ehc->i.action |= ATA_EH_RESET; + goto retry; + } + + ata_port_err(ap, "failed to recover PMP after %d tries, giving up\n", + ATA_EH_PMP_TRIES); + sata_pmp_detach(pmp_dev); + ata_dev_disable(pmp_dev); + + return rc; +} + +/** + * sata_pmp_error_handler - do standard error handling for PMP-enabled host + * @ap: host port to handle error for + * + * Perform standard error handling sequence for PMP-enabled host + * @ap. + * + * LOCKING: + * Kernel thread context (may sleep). + */ +void sata_pmp_error_handler(struct ata_port *ap) +{ + ata_eh_autopsy(ap); + ata_eh_report(ap); + sata_pmp_eh_recover(ap); + ata_eh_finish(ap); +} + +EXPORT_SYMBOL_GPL(sata_pmp_port_ops); +EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch); +EXPORT_SYMBOL_GPL(sata_pmp_error_handler); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 3986ec8741b..72691fd9394 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1,7 +1,7 @@ /* * libata-scsi.c - helper library for ATA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -33,9 +33,11 @@ * */ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/spinlock.h> +#include <linux/export.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> @@ -45,21 +47,24 @@ #include <scsi/scsi_transport.h> #include <linux/libata.h> #include <linux/hdreg.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> +#include <linux/suspend.h> +#include <asm/unaligned.h> #include "libata.h" +#include "libata-transport.h" -#define SECTOR_SIZE 512 +#define ATA_SCSI_RBUF_SIZE 4096 -typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd); +static DEFINE_SPINLOCK(ata_scsi_rbuf_lock); +static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE]; -static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, +typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); + +static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); -static struct ata_device * ata_scsi_find_dev(struct ata_port *ap, +static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); -static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, - unsigned int id, unsigned int lun); - #define RW_RECOVERY_MPAGE 0x1 #define RW_RECOVERY_MPAGE_LEN 12 @@ -71,11 +76,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, #define ALL_SUB_MPAGES 0xff -static const u8 def_rw_recovery_mpage[] = { +static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = { RW_RECOVERY_MPAGE, RW_RECOVERY_MPAGE_LEN - 2, - (1 << 7) | /* AWRE, sat-r06 say it shall be 0 */ - (1 << 6), /* ARRE (auto read reallocation) */ + (1 << 7), /* AWRE */ 0, /* read retry count */ 0, 0, 0, 0, 0, /* write retry count */ @@ -100,23 +104,270 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = { 0, 30 /* extended self test time, see 05-359r1 */ }; -/* - * libata transport template. libata doesn't do real transport stuff. - * It just needs the eh_timed_out hook. - */ -struct scsi_transport_template ata_scsi_transport_template = { - .eh_strategy_handler = ata_scsi_error, - .eh_timed_out = ata_scsi_timed_out, - .user_scan = ata_scsi_user_scan, +static const char *ata_lpm_policy_names[] = { + [ATA_LPM_UNKNOWN] = "max_performance", + [ATA_LPM_MAX_POWER] = "max_performance", + [ATA_LPM_MED_POWER] = "medium_power", + [ATA_LPM_MIN_POWER] = "min_power", }; +static ssize_t ata_scsi_lpm_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(device); + struct ata_port *ap = ata_shost_to_port(shost); + struct ata_link *link; + struct ata_device *dev; + enum ata_lpm_policy policy; + unsigned long flags; + + /* UNKNOWN is internal state, iterate from MAX_POWER */ + for (policy = ATA_LPM_MAX_POWER; + policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) { + const char *name = ata_lpm_policy_names[policy]; + + if (strncmp(name, buf, strlen(name)) == 0) + break; + } + if (policy == ARRAY_SIZE(ata_lpm_policy_names)) + return -EINVAL; + + spin_lock_irqsave(ap->lock, flags); + + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, &ap->link, ENABLED) { + if (dev->horkage & ATA_HORKAGE_NOLPM) { + count = -EOPNOTSUPP; + goto out_unlock; + } + } + } + + ap->target_lpm_policy = policy; + ata_port_schedule_eh(ap); +out_unlock: + spin_unlock_irqrestore(ap->lock, flags); + return count; +} + +static ssize_t ata_scsi_lpm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + + if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names)) + return -EINVAL; + + return snprintf(buf, PAGE_SIZE, "%s\n", + ata_lpm_policy_names[ap->target_lpm_policy]); +} +DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, + ata_scsi_lpm_show, ata_scsi_lpm_store); +EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); + +static ssize_t ata_scsi_park_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct ata_port *ap; + struct ata_link *link; + struct ata_device *dev; + unsigned long flags, now; + unsigned int uninitialized_var(msecs); + int rc = 0; + + ap = ata_shost_to_port(sdev->host); + + spin_lock_irqsave(ap->lock, flags); + dev = ata_scsi_find_dev(ap, sdev); + if (!dev) { + rc = -ENODEV; + goto unlock; + } + if (dev->flags & ATA_DFLAG_NO_UNLOAD) { + rc = -EOPNOTSUPP; + goto unlock; + } + + link = dev->link; + now = jiffies; + if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && + link->eh_context.unloaded_mask & (1 << dev->devno) && + time_after(dev->unpark_deadline, now)) + msecs = jiffies_to_msecs(dev->unpark_deadline - now); + else + msecs = 0; + +unlock: + spin_unlock_irq(ap->lock); + + return rc ? rc : snprintf(buf, 20, "%u\n", msecs); +} + +static ssize_t ata_scsi_park_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct ata_port *ap; + struct ata_device *dev; + long int input; + unsigned long flags; + int rc; + + rc = kstrtol(buf, 10, &input); + if (rc) + return rc; + if (input < -2) + return -EINVAL; + if (input > ATA_TMOUT_MAX_PARK) { + rc = -EOVERFLOW; + input = ATA_TMOUT_MAX_PARK; + } + + ap = ata_shost_to_port(sdev->host); + + spin_lock_irqsave(ap->lock, flags); + dev = ata_scsi_find_dev(ap, sdev); + if (unlikely(!dev)) { + rc = -ENODEV; + goto unlock; + } + if (dev->class != ATA_DEV_ATA) { + rc = -EOPNOTSUPP; + goto unlock; + } + + if (input >= 0) { + if (dev->flags & ATA_DFLAG_NO_UNLOAD) { + rc = -EOPNOTSUPP; + goto unlock; + } + + dev->unpark_deadline = ata_deadline(jiffies, input); + dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK; + ata_port_schedule_eh(ap); + complete(&ap->park_req_pending); + } else { + switch (input) { + case -1: + dev->flags &= ~ATA_DFLAG_NO_UNLOAD; + break; + case -2: + dev->flags |= ATA_DFLAG_NO_UNLOAD; + break; + } + } +unlock: + spin_unlock_irqrestore(ap->lock, flags); + + return rc ? rc : len; +} +DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR, + ata_scsi_park_show, ata_scsi_park_store); +EXPORT_SYMBOL_GPL(dev_attr_unload_heads); -static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) +static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) +{ + cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; + + scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); +} + +static ssize_t +ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM)) + return ap->ops->em_store(ap, buf, count); + return -EINVAL; +} + +static ssize_t +ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + + if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM)) + return ap->ops->em_show(ap, buf); + return -EINVAL; +} +DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR, + ata_scsi_em_message_show, ata_scsi_em_message_store); +EXPORT_SYMBOL_GPL(dev_attr_em_message); + +static ssize_t +ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + + return snprintf(buf, 23, "%d\n", ap->em_message_type); +} +DEVICE_ATTR(em_message_type, S_IRUGO, + ata_scsi_em_message_type_show, NULL); +EXPORT_SYMBOL_GPL(dev_attr_em_message_type); + +static ssize_t +ata_scsi_activity_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); + + if (atadev && ap->ops->sw_activity_show && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) + return ap->ops->sw_activity_show(atadev, buf); + return -EINVAL; +} + +static ssize_t +ata_scsi_activity_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); + enum sw_activity val; + int rc; + + if (atadev && ap->ops->sw_activity_store && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) { + val = simple_strtoul(buf, NULL, 0); + switch (val) { + case OFF: case BLINK_ON: case BLINK_OFF: + rc = ap->ops->sw_activity_store(atadev, val); + if (!rc) + return count; + else + return rc; + } + } + return -EINVAL; +} +DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show, + ata_scsi_activity_store); +EXPORT_SYMBOL_GPL(dev_attr_sw_activity); + +struct device_attribute *ata_common_sdev_attrs[] = { + &dev_attr_unload_heads, + NULL +}; +EXPORT_SYMBOL_GPL(ata_common_sdev_attrs); + +static void ata_scsi_invalid_field(struct scsi_cmnd *cmd) { ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); /* "Invalid field in cbd" */ - done(cmd); + cmd->scsi_done(cmd); } /** @@ -149,6 +400,75 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, } /** + * ata_scsi_unlock_native_capacity - unlock native capacity + * @sdev: SCSI device to adjust device capacity for + * + * This function is called if a partition on @sdev extends beyond + * the end of the device. It requests EH to unlock HPA. + * + * LOCKING: + * Defined by the SCSI layer. Might sleep. + */ +void ata_scsi_unlock_native_capacity(struct scsi_device *sdev) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *dev; + unsigned long flags; + + spin_lock_irqsave(ap->lock, flags); + + dev = ata_scsi_find_dev(ap, sdev); + if (dev && dev->n_sectors < dev->n_native_sectors) { + dev->flags |= ATA_DFLAG_UNLOCK_HPA; + dev->link->eh_info.action |= ATA_EH_RESET; + ata_port_schedule_eh(ap); + } + + spin_unlock_irqrestore(ap->lock, flags); + ata_port_wait_eh(ap); +} + +/** + * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl + * @ap: target port + * @sdev: SCSI device to get identify data for + * @arg: User buffer area for identify data + * + * LOCKING: + * Defined by the SCSI layer. We don't really care. + * + * RETURNS: + * Zero on success, negative errno on error. + */ +static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev, + void __user *arg) +{ + struct ata_device *dev = ata_scsi_find_dev(ap, sdev); + u16 __user *dst = arg; + char buf[40]; + + if (!dev) + return -ENOMSG; + + if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16))) + return -EFAULT; + + ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN); + if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN)) + return -EFAULT; + + ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN); + if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN)) + return -EFAULT; + + ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN); + if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN)) + return -EFAULT; + + return 0; +} + +/** * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl * @scsidev: Device to which we are issuing command * @arg: User provided data for issuing command @@ -159,15 +479,14 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, * RETURNS: * Zero on success, negative errno on error. */ - int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) { int rc = 0; u8 scsi_cmd[MAX_COMMAND_SIZE]; - u8 args[4], *argbuf = NULL; + u8 args[4], *argbuf = NULL, *sensebuf = NULL; int argsize = 0; - struct scsi_sense_hdr sshdr; enum dma_data_direction data_dir; + int cmd_result; if (arg == NULL) return -EINVAL; @@ -175,10 +494,14 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) if (copy_from_user(args, arg, sizeof(args))) return -EFAULT; + sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); + if (!sensebuf) + return -ENOMEM; + memset(scsi_cmd, 0, sizeof(scsi_cmd)); if (args[3]) { - argsize = SECTOR_SIZE * args[3]; + argsize = ATA_SECT_SIZE * args[3]; argbuf = kmalloc(argsize, GFP_KERNEL); if (argbuf == NULL) { rc = -ENOMEM; @@ -187,18 +510,18 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) scsi_cmd[1] = (4 << 1); /* PIO Data-in */ scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, - block count in sector count field */ + block count in sector count field */ data_dir = DMA_FROM_DEVICE; } else { scsi_cmd[1] = (3 << 1); /* Non-data */ - /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */ + scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ data_dir = DMA_NONE; } scsi_cmd[0] = ATA_16; scsi_cmd[4] = args[2]; - if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */ + if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */ scsi_cmd[6] = args[3]; scsi_cmd[8] = args[1]; scsi_cmd[10] = 0x4f; @@ -210,18 +533,46 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) /* Good values for timeout and retries? Values below from scsi_ioctl_send_command() for default case... */ - if (scsi_execute_req(scsidev, scsi_cmd, data_dir, argbuf, argsize, - &sshdr, (10*HZ), 5)) { + cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, + sensebuf, (10*HZ), 5, 0, NULL); + + if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ + u8 *desc = sensebuf + 8; + cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ + + /* If we set cc then ATA pass-through will cause a + * check condition even if no error. Filter that. */ + if (cmd_result & SAM_STAT_CHECK_CONDITION) { + struct scsi_sense_hdr sshdr; + scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, + &sshdr); + if (sshdr.sense_key == RECOVERED_ERROR && + sshdr.asc == 0 && sshdr.ascq == 0x1d) + cmd_result &= ~SAM_STAT_CHECK_CONDITION; + } + + /* Send userspace a few ATA registers (same as drivers/ide) */ + if (sensebuf[0] == 0x72 && /* format is "descriptor" */ + desc[0] == 0x09) { /* code is "ATA Descriptor" */ + args[0] = desc[13]; /* status */ + args[1] = desc[3]; /* error */ + args[2] = desc[5]; /* sector count (0:7) */ + if (copy_to_user(arg, args, sizeof(args))) + rc = -EFAULT; + } + } + + + if (cmd_result) { rc = -EIO; goto error; } - /* Need code to retrieve data from check condition? */ - if ((argbuf) && copy_to_user(arg + sizeof(args), argbuf, argsize)) rc = -EFAULT; error: + kfree(sensebuf); kfree(argbuf); return rc; } @@ -241,8 +592,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) { int rc = 0; u8 scsi_cmd[MAX_COMMAND_SIZE]; - u8 args[7]; - struct scsi_sense_hdr sshdr; + u8 args[7], *sensebuf = NULL; + int cmd_result; if (arg == NULL) return -EINVAL; @@ -250,43 +601,109 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) if (copy_from_user(args, arg, sizeof(args))) return -EFAULT; + sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); + if (!sensebuf) + return -ENOMEM; + memset(scsi_cmd, 0, sizeof(scsi_cmd)); scsi_cmd[0] = ATA_16; scsi_cmd[1] = (3 << 1); /* Non-data */ - /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */ + scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ scsi_cmd[4] = args[1]; scsi_cmd[6] = args[2]; scsi_cmd[8] = args[3]; scsi_cmd[10] = args[4]; scsi_cmd[12] = args[5]; + scsi_cmd[13] = args[6] & 0x4f; scsi_cmd[14] = args[0]; /* Good values for timeout and retries? Values below from scsi_ioctl_send_command() for default case... */ - if (scsi_execute_req(scsidev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr, - (10*HZ), 5)) + cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0, + sensebuf, (10*HZ), 5, 0, NULL); + + if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ + u8 *desc = sensebuf + 8; + cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ + + /* If we set cc then ATA pass-through will cause a + * check condition even if no error. Filter that. */ + if (cmd_result & SAM_STAT_CHECK_CONDITION) { + struct scsi_sense_hdr sshdr; + scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, + &sshdr); + if (sshdr.sense_key == RECOVERED_ERROR && + sshdr.asc == 0 && sshdr.ascq == 0x1d) + cmd_result &= ~SAM_STAT_CHECK_CONDITION; + } + + /* Send userspace ATA registers */ + if (sensebuf[0] == 0x72 && /* format is "descriptor" */ + desc[0] == 0x09) {/* code is "ATA Descriptor" */ + args[0] = desc[13]; /* status */ + args[1] = desc[3]; /* error */ + args[2] = desc[5]; /* sector count (0:7) */ + args[3] = desc[7]; /* lbal */ + args[4] = desc[9]; /* lbam */ + args[5] = desc[11]; /* lbah */ + args[6] = desc[12]; /* select */ + if (copy_to_user(arg, args, sizeof(args))) + rc = -EFAULT; + } + } + + if (cmd_result) { rc = -EIO; + goto error; + } - /* Need code to retrieve data from check condition? */ + error: + kfree(sensebuf); return rc; } -int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) +static int ata_ioc32(struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_PIO_DMA) + return 1; + if (ap->pflags & ATA_PFLAG_PIO32) + return 1; + return 0; +} + +int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, + int cmd, void __user *arg) { int val = -EINVAL, rc = -EINVAL; + unsigned long flags; switch (cmd) { case ATA_IOC_GET_IO32: - val = 0; + spin_lock_irqsave(ap->lock, flags); + val = ata_ioc32(ap); + spin_unlock_irqrestore(ap->lock, flags); if (copy_to_user(arg, &val, 1)) return -EFAULT; return 0; case ATA_IOC_SET_IO32: val = (unsigned long) arg; - if (val != 0) - return -EINVAL; - return 0; + rc = 0; + spin_lock_irqsave(ap->lock, flags); + if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { + if (val) + ap->pflags |= ATA_PFLAG_PIO32; + else + ap->pflags &= ~ATA_PFLAG_PIO32; + } else { + if (val != ata_ioc32(ap)) + rc = -EINVAL; + } + spin_unlock_irqrestore(ap->lock, flags); + return rc; + + case HDIO_GET_IDENTITY: + return ata_get_identity(ap, scsidev, arg); case HDIO_DRIVE_CMD: if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) @@ -305,12 +722,19 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) return rc; } +EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl); + +int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) +{ + return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host), + scsidev, cmd, arg); +} +EXPORT_SYMBOL_GPL(ata_scsi_ioctl); /** * ata_scsi_qc_new - acquire new ata_queued_cmd reference * @dev: ATA device to which the new command is attached * @cmd: SCSI command that originated this ATA command - * @done: SCSI command completion function * * Obtain a reference to an unused ata_queued_cmd structure, * which is the basic libata structure representing a single @@ -326,32 +750,34 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) * RETURNS: * Command allocated, or %NULL if none available. */ -struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, - struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) +static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, + struct scsi_cmnd *cmd) { struct ata_queued_cmd *qc; qc = ata_qc_new_init(dev); if (qc) { qc->scsicmd = cmd; - qc->scsidone = done; + qc->scsidone = cmd->scsi_done; - if (cmd->use_sg) { - qc->__sg = (struct scatterlist *) cmd->request_buffer; - qc->n_elem = cmd->use_sg; - } else { - qc->__sg = &qc->sgent; - qc->n_elem = 1; - } + qc->sg = scsi_sglist(cmd); + qc->n_elem = scsi_sg_count(cmd); } else { cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); - done(cmd); + cmd->scsi_done(cmd); } return qc; } +static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc) +{ + struct scsi_cmnd *scmd = qc->scsicmd; + + qc->extrabytes = scmd->request->extra_len; + qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes; +} + /** * ata_dump_status - user friendly display of error info * @id: id of the port in question @@ -364,7 +790,7 @@ struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, * LOCKING: * inherited from caller */ -void ata_dump_status(unsigned id, struct ata_taskfile *tf) +static void ata_dump_status(unsigned id, struct ata_taskfile *tf) { u8 stat = tf->command, err = tf->feature; @@ -398,131 +824,6 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf) } /** - * ata_scsi_device_suspend - suspend ATA device associated with sdev - * @sdev: the SCSI device to suspend - * @mesg: target power management message - * - * Request suspend EH action on the ATA device associated with - * @sdev and wait for the operation to complete. - * - * LOCKING: - * Kernel thread context (may sleep). - * - * RETURNS: - * 0 on success, -errno otherwise. - */ -int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t mesg) -{ - struct ata_port *ap = ata_shost_to_port(sdev->host); - struct ata_device *dev = ata_scsi_find_dev(ap, sdev); - unsigned long flags; - unsigned int action; - int rc = 0; - - if (!dev) - goto out; - - spin_lock_irqsave(ap->lock, flags); - - /* wait for the previous resume to complete */ - while (dev->flags & ATA_DFLAG_SUSPENDED) { - spin_unlock_irqrestore(ap->lock, flags); - ata_port_wait_eh(ap); - spin_lock_irqsave(ap->lock, flags); - } - - /* if @sdev is already detached, nothing to do */ - if (sdev->sdev_state == SDEV_OFFLINE || - sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) - goto out_unlock; - - /* request suspend */ - action = ATA_EH_SUSPEND; - if (mesg.event != PM_EVENT_SUSPEND) - action |= ATA_EH_PM_FREEZE; - ap->eh_info.dev_action[dev->devno] |= action; - ap->eh_info.flags |= ATA_EHI_QUIET; - ata_port_schedule_eh(ap); - - spin_unlock_irqrestore(ap->lock, flags); - - /* wait for EH to do the job */ - ata_port_wait_eh(ap); - - spin_lock_irqsave(ap->lock, flags); - - /* If @sdev is still attached but the associated ATA device - * isn't suspended, the operation failed. - */ - if (sdev->sdev_state != SDEV_OFFLINE && - sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL && - !(dev->flags & ATA_DFLAG_SUSPENDED)) - rc = -EIO; - - out_unlock: - spin_unlock_irqrestore(ap->lock, flags); - out: - if (rc == 0) - sdev->sdev_gendev.power.power_state = mesg; - return rc; -} - -/** - * ata_scsi_device_resume - resume ATA device associated with sdev - * @sdev: the SCSI device to resume - * - * Request resume EH action on the ATA device associated with - * @sdev and return immediately. This enables parallel - * wakeup/spinup of devices. - * - * LOCKING: - * Kernel thread context (may sleep). - * - * RETURNS: - * 0. - */ -int ata_scsi_device_resume(struct scsi_device *sdev) -{ - struct ata_port *ap = ata_shost_to_port(sdev->host); - struct ata_device *dev = ata_scsi_find_dev(ap, sdev); - struct ata_eh_info *ehi = &ap->eh_info; - unsigned long flags; - unsigned int action; - - if (!dev) - goto out; - - spin_lock_irqsave(ap->lock, flags); - - /* if @sdev is already detached, nothing to do */ - if (sdev->sdev_state == SDEV_OFFLINE || - sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) - goto out_unlock; - - /* request resume */ - action = ATA_EH_RESUME; - if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND) - __ata_ehi_hotplugged(ehi); - else - action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET; - ehi->dev_action[dev->devno] |= action; - - /* We don't want autopsy and verbose EH messages. Disable - * those if we're the only device on this link. - */ - if (ata_port_max_devices(ap) == 1) - ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; - - ata_port_schedule_eh(ap); - - out_unlock: - spin_unlock_irqrestore(ap->lock, flags); - out: - sdev->sdev_gendev.power.power_state = PMSG_ON; - return 0; -} - -/** * ata_to_sense_error - convert ATA error to SCSI error * @id: ATA device number * @drv_stat: value contained in ATA status register @@ -539,8 +840,8 @@ int ata_scsi_device_resume(struct scsi_device *sdev) * LOCKING: * spin_lock_irqsave(host lock) */ -void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, - u8 *ascq, int verbose) +static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, + u8 *asc, u8 *ascq, int verbose) { int i; @@ -561,25 +862,24 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, /* Bad address mark */ {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field /* TRK0 */ - {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error - /* Abort & !ICRC */ - {0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command + {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error + /* Abort: 0x04 is not translated here, see below */ /* Media change request */ {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline - /* SRV */ - {0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found - /* Media change */ - {0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline + /* SRV/IDNF */ + {0x10, ILLEGAL_REQUEST, 0x21, 0x00}, // ID not found Logical address out of range + /* MC */ + {0x20, UNIT_ATTENTION, 0x28, 0x00}, // Media Changed Not ready to ready change, medium may have changed /* ECC */ {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error /* BBD - block marked bad */ - {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error + {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error {0xFF, 0xFF, 0xFF, 0xFF}, // END mark }; static const unsigned char stat_table[][4] = { /* Must be first because BUSY means no other bits valid */ {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now - {0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault + {0x20, HARDWARE_ERROR, 0x44, 0x00}, // Device fault, internal target failure {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered {0xFF, 0xFF, 0xFF, 0xFF}, // END mark @@ -604,13 +904,13 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, goto translate_done; } } - /* No immediate match */ - if (verbose) - printk(KERN_WARNING "ata%u: no sense translation for " - "error 0x%02x\n", id, drv_err); } - /* Fall back to interpreting status bits */ + /* + * Fall back to interpreting status bits. Note that if the drv_err + * has only the ABRT bit set, we decode drv_stat. ABRT by itself + * is not descriptive enough. + */ for (i = 0; stat_table[i][0] != 0xFF; i++) { if (stat_table[i][0] & drv_stat) { *sk = stat_table[i][1]; @@ -619,13 +919,11 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, goto translate_done; } } - /* No error? Undecoded? */ - if (verbose) - printk(KERN_WARNING "ata%u: no sense translation for " - "status: 0x%02x\n", id, drv_stat); - /* We need a sensible error return here, which is tricky, and one - that won't cause people to do things like return a disk wrongly */ + /* + * We need a sensible error return here, which is tricky, and one + * that won't cause people to do things like return a disk wrongly. + */ *sk = ABORTED_COMMAND; *asc = 0x00; *ascq = 0x00; @@ -639,19 +937,23 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, } /* - * ata_gen_ata_desc_sense - Generate check condition sense block. + * ata_gen_passthru_sense - Generate check condition sense block. * @qc: Command that completed. * * This function is specific to the ATA descriptor format sense * block specified for the ATA pass through commands. Regardless * of whether the command errored or not, return a sense * block. Copy all controller registers into the sense - * block. Clear sense key, ASC & ASCQ if there is no error. + * block. If there was no error, we get the request from an ATA + * passthrough command, so we use the following sense data: + * sk = RECOVERED ERROR + * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE + * * * LOCKING: - * spin_lock_irqsave(host lock) + * None. */ -void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) +static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) { struct scsi_cmnd *cmd = qc->scsicmd; struct ata_taskfile *tf = &qc->result_tf; @@ -669,9 +971,13 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) */ if (qc->err_mask || tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { - ata_to_sense_error(qc->ap->id, tf->command, tf->feature, + ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, &sb[1], &sb[2], &sb[3], verbose); sb[1] &= 0x0f; + } else { + sb[1] = RECOVERED_ERROR; + sb[2] = 0; + sb[3] = 0x1D; } /* @@ -681,12 +987,9 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) desc[0] = 0x09; - /* - * Set length of additional sense data. - * Since we only populate descriptor 0, the total - * length is the same (fixed) length as descriptor 0. - */ - desc[1] = sb[7] = 14; + /* set length of additional sense data */ + sb[7] = 14; + desc[1] = 12; /* * Copy registers into sense buffer. @@ -714,92 +1017,148 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) } /** - * ata_gen_fixed_sense - generate a SCSI fixed sense block + * ata_gen_ata_sense - generate a SCSI fixed sense block * @qc: Command that we are erroring out * - * Leverage ata_to_sense_error() to give us the codes. Fit our - * LBA in here if there's room. + * Generate sense block for a failed ATA command @qc. Descriptor + * format is used to accommodate LBA48 block address. * * LOCKING: - * inherited from caller + * None. */ -void ata_gen_fixed_sense(struct ata_queued_cmd *qc) +static void ata_gen_ata_sense(struct ata_queued_cmd *qc) { + struct ata_device *dev = qc->dev; struct scsi_cmnd *cmd = qc->scsicmd; struct ata_taskfile *tf = &qc->result_tf; unsigned char *sb = cmd->sense_buffer; + unsigned char *desc = sb + 8; int verbose = qc->ap->ops->error_handler == NULL; + u64 block; memset(sb, 0, SCSI_SENSE_BUFFERSIZE); cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; - /* - * Use ata_to_sense_error() to map status register bits + /* sense data is current and format is descriptor */ + sb[0] = 0x72; + + /* Use ata_to_sense_error() to map status register bits * onto sense key, asc & ascq. */ if (qc->err_mask || tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { - ata_to_sense_error(qc->ap->id, tf->command, tf->feature, - &sb[2], &sb[12], &sb[13], verbose); - sb[2] &= 0x0f; + ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, + &sb[1], &sb[2], &sb[3], verbose); + sb[1] &= 0x0f; } - sb[0] = 0x70; - sb[7] = 0x0a; - - if (tf->flags & ATA_TFLAG_LBA48) { - /* TODO: find solution for LBA48 descriptors */ - } + block = ata_tf_read_block(&qc->result_tf, dev); - else if (tf->flags & ATA_TFLAG_LBA) { - /* A small (28b) LBA will fit in the 32b info field */ - sb[0] |= 0x80; /* set valid bit */ - sb[3] = tf->device & 0x0f; - sb[4] = tf->lbah; - sb[5] = tf->lbam; - sb[6] = tf->lbal; - } + /* information sense data descriptor */ + sb[7] = 12; + desc[0] = 0x00; + desc[1] = 10; - else { - /* TODO: C/H/S */ - } + desc[2] |= 0x80; /* valid */ + desc[6] = block >> 40; + desc[7] = block >> 32; + desc[8] = block >> 24; + desc[9] = block >> 16; + desc[10] = block >> 8; + desc[11] = block; } static void ata_scsi_sdev_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; + sdev->no_report_opcodes = 1; + sdev->no_write_same = 1; + + /* Schedule policy is determined by ->qc_defer() callback and + * it needs to see every deferred qc. Set dev_blocked to 1 to + * prevent SCSI midlayer from automatically deferring + * requests. + */ + sdev->max_device_blocked = 1; } -static void ata_scsi_dev_config(struct scsi_device *sdev, - struct ata_device *dev) +/** + * atapi_drain_needed - Check whether data transfer may overflow + * @rq: request to be checked + * + * ATAPI commands which transfer variable length data to host + * might overflow due to application error or hardare bug. This + * function checks whether overflow should be drained and ignored + * for @request. + * + * LOCKING: + * None. + * + * RETURNS: + * 1 if ; otherwise, 0. + */ +static int atapi_drain_needed(struct request *rq) { - unsigned int max_sectors; + if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) + return 0; - /* TODO: 2048 is an arbitrary number, not the - * hardware maximum. This should be increased to - * 65534 when Jens Axboe's patch for dynamically - * determining max_sectors is merged. - */ - max_sectors = ATA_MAX_SECTORS; - if (dev->flags & ATA_DFLAG_LBA48) - max_sectors = ATA_MAX_SECTORS_LBA48; - if (dev->max_sectors) - max_sectors = dev->max_sectors; + if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE)) + return 0; - blk_queue_max_sectors(sdev->request_queue, max_sectors); + return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; +} + +static int ata_scsi_dev_config(struct scsi_device *sdev, + struct ata_device *dev) +{ + struct request_queue *q = sdev->request_queue; + + if (!ata_id_has_unload(dev->id)) + dev->flags |= ATA_DFLAG_NO_UNLOAD; + + /* configure max sectors */ + blk_queue_max_hw_sectors(q, dev->max_sectors); - /* - * SATA DMA transfers must be multiples of 4 byte, so - * we need to pad ATAPI transfers using an extra sg. - * Decrement max hw segments accordingly. - */ if (dev->class == ATA_DEV_ATAPI) { - request_queue_t *q = sdev->request_queue; - blk_queue_max_hw_segments(q, q->max_hw_segments - 1); + void *buf; + + sdev->sector_size = ATA_SECT_SIZE; + + /* set DMA padding */ + blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); + + /* configure draining */ + buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); + if (!buf) { + ata_dev_err(dev, "drain buffer allocation failed\n"); + return -ENOMEM; + } + + blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); + } else { + sdev->sector_size = ata_id_logical_sector_size(dev->id); + sdev->manage_start_stop = 1; } + /* + * ata_pio_sectors() expects buffer for each sector to not cross + * page boundary. Enforce it by requiring buffers to be sector + * aligned, which works iff sector_size is not larger than + * PAGE_SIZE. ATAPI devices also need the alignment as + * IDENTIFY_PACKET is executed as ATA_PROT_PIO. + */ + if (sdev->sector_size > PAGE_SIZE) + ata_dev_warn(dev, + "sector_size=%u > PAGE_SIZE, PIO may malfunction\n", + sdev->sector_size); + + blk_queue_update_dma_alignment(q, sdev->sector_size - 1); + + if (dev->flags & ATA_DFLAG_AN) + set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); + if (dev->flags & ATA_DFLAG_NCQ) { int depth; @@ -807,6 +1166,11 @@ static void ata_scsi_dev_config(struct scsi_device *sdev, depth = min(ATA_MAX_QUEUE - 1, depth); scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); } + + blk_queue_flush_queueable(q, false); + + dev->sdev = sdev; + return 0; } /** @@ -825,15 +1189,14 @@ int ata_scsi_slave_config(struct scsi_device *sdev) { struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); + int rc = 0; ata_scsi_sdev_config(sdev); - blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD); - if (dev) - ata_scsi_dev_config(sdev, dev); + rc = ata_scsi_dev_config(sdev, dev); - return 0; /* scsi layer doesn't check return value, sigh */ + return rc; } /** @@ -853,6 +1216,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev) void ata_scsi_slave_destroy(struct scsi_device *sdev) { struct ata_port *ap = ata_shost_to_port(sdev->host); + struct request_queue *q = sdev->request_queue; unsigned long flags; struct ata_device *dev; @@ -868,49 +1232,87 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) ata_port_schedule_eh(ap); } spin_unlock_irqrestore(ap->lock, flags); + + kfree(q->dma_drain_buffer); + q->dma_drain_buffer = NULL; + q->dma_drain_size = 0; } /** - * ata_scsi_change_queue_depth - SCSI callback for queue depth config + * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth + * @ap: ATA port to which the device change the queue depth * @sdev: SCSI device to configure queue depth for * @queue_depth: new queue depth + * @reason: calling context * - * This is libata standard hostt->change_queue_depth callback. - * SCSI will call into this callback when user tries to set queue - * depth via sysfs. - * - * LOCKING: - * SCSI layer (we don't care) + * libsas and libata have different approaches for associating a sdev to + * its ata_port. * - * RETURNS: - * Newly configured queue depth. */ -int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) +int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, + int queue_depth, int reason) { - struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev; - int max_depth; + unsigned long flags; - if (queue_depth < 1) + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + + if (queue_depth < 1 || queue_depth == sdev->queue_depth) return sdev->queue_depth; dev = ata_scsi_find_dev(ap, sdev); if (!dev || !ata_dev_enabled(dev)) return sdev->queue_depth; - max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); - max_depth = min(ATA_MAX_QUEUE - 1, max_depth); - if (queue_depth > max_depth) - queue_depth = max_depth; + /* NCQ enabled? */ + spin_lock_irqsave(ap->lock, flags); + dev->flags &= ~ATA_DFLAG_NCQ_OFF; + if (queue_depth == 1 || !ata_ncq_enabled(dev)) { + dev->flags |= ATA_DFLAG_NCQ_OFF; + queue_depth = 1; + } + spin_unlock_irqrestore(ap->lock, flags); + + /* limit and apply queue depth */ + queue_depth = min(queue_depth, sdev->host->can_queue); + queue_depth = min(queue_depth, ata_id_queue_depth(dev->id)); + queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1); + + if (sdev->queue_depth == queue_depth) + return -EINVAL; scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth); return queue_depth; } /** + * ata_scsi_change_queue_depth - SCSI callback for queue depth config + * @sdev: SCSI device to configure queue depth for + * @queue_depth: new queue depth + * @reason: calling context + * + * This is libata standard hostt->change_queue_depth callback. + * SCSI will call into this callback when user tries to set queue + * depth via sysfs. + * + * LOCKING: + * SCSI layer (we don't care) + * + * RETURNS: + * Newly configured queue depth. + */ +int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth, + int reason) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + + return __ata_change_queue_depth(ap, sdev, queue_depth, reason); +} + +/** * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command * @qc: Storage for translated ATA taskfile - * @scsicmd: SCSI command to translate * * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY * (to start). Perhaps these commands should be preceded by @@ -923,22 +1325,26 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) * RETURNS: * Zero on success, non-zero on error. */ - -static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc, - const u8 *scsicmd) +static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) { + struct scsi_cmnd *scmd = qc->scsicmd; struct ata_taskfile *tf = &qc->tf; + const u8 *cdb = scmd->cmnd; + + if (scmd->cmd_len < 5) + goto invalid_fld; tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; tf->protocol = ATA_PROT_NODATA; - if (scsicmd[1] & 0x1) { + if (cdb[1] & 0x1) { ; /* ignore IMMED bit, violates sat-r05 */ } - if (scsicmd[4] & 0x2) + if (cdb[4] & 0x2) goto invalid_fld; /* LOEJ bit set not supported */ - if (((scsicmd[4] >> 4) & 0xf) != 0) + if (((cdb[4] >> 4) & 0xf) != 0) goto invalid_fld; /* power conditions not supported */ - if (scsicmd[4] & 0x1) { + + if (cdb[4] & 0x1) { tf->nsect = 1; /* 1 sector, lba=0 */ if (qc->dev->flags & ATA_DFLAG_LBA) { @@ -957,10 +1363,21 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc, tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ } else { - tf->nsect = 0; /* time period value (0 implies now) */ - tf->command = ATA_CMD_STANDBY; - /* Consider: ATA STANDBY IMMEDIATE command */ + /* Some odd clown BIOSen issue spindown on power off (ACPI S4 + * or S5) causing some drives to spin up and down again. + */ + if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) && + system_state == SYSTEM_POWER_OFF) + goto skip; + + if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && + system_entering_hibernation()) + goto skip; + + /* Issue ATA STANDBY IMMEDIATE command */ + tf->command = ATA_CMD_STANDBYNOW1; } + /* * Standby and Idle condition timers could be implemented but that * would require libata to implement the Power condition mode page @@ -970,17 +1387,19 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc, return 0; -invalid_fld: - ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0); + invalid_fld: + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); /* "Invalid field in cbd" */ return 1; + skip: + scmd->result = SAM_STAT_GOOD; + return 1; } /** * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command * @qc: Storage for translated ATA taskfile - * @scsicmd: SCSI command to translate (ignored) * * Sets up an ATA taskfile to issue FLUSH CACHE or * FLUSH CACHE EXT. @@ -991,26 +1410,27 @@ invalid_fld: * RETURNS: * Zero on success, non-zero on error. */ - -static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd) +static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc) { struct ata_taskfile *tf = &qc->tf; tf->flags |= ATA_TFLAG_DEVICE; tf->protocol = ATA_PROT_NODATA; - if ((qc->dev->flags & ATA_DFLAG_LBA48) && - (ata_id_has_flush_ext(qc->dev->id))) + if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) tf->command = ATA_CMD_FLUSH_EXT; else tf->command = ATA_CMD_FLUSH; + /* flush is critical for IO integrity, consider it an IO command */ + qc->flags |= ATA_QCFLAG_IO; + return 0; } /** * scsi_6_lba_len - Get LBA and transfer length - * @scsicmd: SCSI command to translate + * @cdb: SCSI command to translate * * Calculate LBA and transfer length for 6-byte commands. * @@ -1018,18 +1438,18 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scs * @plba: the LBA * @plen: the transfer length */ - -static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) +static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen) { u64 lba = 0; - u32 len = 0; + u32 len; VPRINTK("six-byte command\n"); - lba |= ((u64)scsicmd[2]) << 8; - lba |= ((u64)scsicmd[3]); + lba |= ((u64)(cdb[1] & 0x1f)) << 16; + lba |= ((u64)cdb[2]) << 8; + lba |= ((u64)cdb[3]); - len |= ((u32)scsicmd[4]); + len = cdb[4]; *plba = lba; *plen = len; @@ -1037,7 +1457,7 @@ static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) /** * scsi_10_lba_len - Get LBA and transfer length - * @scsicmd: SCSI command to translate + * @cdb: SCSI command to translate * * Calculate LBA and transfer length for 10-byte commands. * @@ -1045,21 +1465,20 @@ static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) * @plba: the LBA * @plen: the transfer length */ - -static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) +static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen) { u64 lba = 0; u32 len = 0; VPRINTK("ten-byte command\n"); - lba |= ((u64)scsicmd[2]) << 24; - lba |= ((u64)scsicmd[3]) << 16; - lba |= ((u64)scsicmd[4]) << 8; - lba |= ((u64)scsicmd[5]); + lba |= ((u64)cdb[2]) << 24; + lba |= ((u64)cdb[3]) << 16; + lba |= ((u64)cdb[4]) << 8; + lba |= ((u64)cdb[5]); - len |= ((u32)scsicmd[7]) << 8; - len |= ((u32)scsicmd[8]); + len |= ((u32)cdb[7]) << 8; + len |= ((u32)cdb[8]); *plba = lba; *plen = len; @@ -1067,7 +1486,7 @@ static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) /** * scsi_16_lba_len - Get LBA and transfer length - * @scsicmd: SCSI command to translate + * @cdb: SCSI command to translate * * Calculate LBA and transfer length for 16-byte commands. * @@ -1075,27 +1494,26 @@ static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) * @plba: the LBA * @plen: the transfer length */ - -static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) +static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen) { u64 lba = 0; u32 len = 0; VPRINTK("sixteen-byte command\n"); - lba |= ((u64)scsicmd[2]) << 56; - lba |= ((u64)scsicmd[3]) << 48; - lba |= ((u64)scsicmd[4]) << 40; - lba |= ((u64)scsicmd[5]) << 32; - lba |= ((u64)scsicmd[6]) << 24; - lba |= ((u64)scsicmd[7]) << 16; - lba |= ((u64)scsicmd[8]) << 8; - lba |= ((u64)scsicmd[9]); + lba |= ((u64)cdb[2]) << 56; + lba |= ((u64)cdb[3]) << 48; + lba |= ((u64)cdb[4]) << 40; + lba |= ((u64)cdb[5]) << 32; + lba |= ((u64)cdb[6]) << 24; + lba |= ((u64)cdb[7]) << 16; + lba |= ((u64)cdb[8]) << 8; + lba |= ((u64)cdb[9]); - len |= ((u32)scsicmd[10]) << 24; - len |= ((u32)scsicmd[11]) << 16; - len |= ((u32)scsicmd[12]) << 8; - len |= ((u32)scsicmd[13]); + len |= ((u32)cdb[10]) << 24; + len |= ((u32)cdb[11]) << 16; + len |= ((u32)cdb[12]) << 8; + len |= ((u32)cdb[13]); *plba = lba; *plen = len; @@ -1104,7 +1522,6 @@ static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) /** * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one * @qc: Storage for translated ATA taskfile - * @scsicmd: SCSI command to translate * * Converts SCSI VERIFY command to an ATA READ VERIFY command. * @@ -1114,23 +1531,28 @@ static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) * RETURNS: * Zero on success, non-zero on error. */ - -static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd) +static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) { + struct scsi_cmnd *scmd = qc->scsicmd; struct ata_taskfile *tf = &qc->tf; struct ata_device *dev = qc->dev; u64 dev_sectors = qc->dev->n_sectors; + const u8 *cdb = scmd->cmnd; u64 block; u32 n_block; tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf->protocol = ATA_PROT_NODATA; - if (scsicmd[0] == VERIFY) - scsi_10_lba_len(scsicmd, &block, &n_block); - else if (scsicmd[0] == VERIFY_16) - scsi_16_lba_len(scsicmd, &block, &n_block); - else + if (cdb[0] == VERIFY) { + if (scmd->cmd_len < 10) + goto invalid_fld; + scsi_10_lba_len(cdb, &block, &n_block); + } else if (cdb[0] == VERIFY_16) { + if (scmd->cmd_len < 16) + goto invalid_fld; + scsi_16_lba_len(cdb, &block, &n_block); + } else goto invalid_fld; if (!n_block) @@ -1205,24 +1627,23 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc return 0; invalid_fld: - ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0); + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); /* "Invalid field in cbd" */ return 1; out_of_range: - ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0); + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0); /* "Logical Block Address out of range" */ return 1; nothing_to_do: - qc->scsicmd->result = SAM_STAT_GOOD; + scmd->result = SAM_STAT_GOOD; return 1; } /** * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one * @qc: Storage for translated ATA taskfile - * @scsicmd: SCSI command to translate * * Converts any of six SCSI read/write commands into the * ATA counterpart, including starting sector (LBA), @@ -1238,32 +1659,33 @@ nothing_to_do: * RETURNS: * Zero on success, non-zero on error. */ - -static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd) +static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) { - struct ata_taskfile *tf = &qc->tf; - struct ata_device *dev = qc->dev; + struct scsi_cmnd *scmd = qc->scsicmd; + const u8 *cdb = scmd->cmnd; + unsigned int tf_flags = 0; u64 block; u32 n_block; + int rc; - qc->flags |= ATA_QCFLAG_IO; - tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; - - if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 || - scsicmd[0] == WRITE_16) - tf->flags |= ATA_TFLAG_WRITE; + if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16) + tf_flags |= ATA_TFLAG_WRITE; /* Calculate the SCSI LBA, transfer length and FUA. */ - switch (scsicmd[0]) { + switch (cdb[0]) { case READ_10: case WRITE_10: - scsi_10_lba_len(scsicmd, &block, &n_block); - if (unlikely(scsicmd[1] & (1 << 3))) - tf->flags |= ATA_TFLAG_FUA; + if (unlikely(scmd->cmd_len < 10)) + goto invalid_fld; + scsi_10_lba_len(cdb, &block, &n_block); + if (cdb[1] & (1 << 3)) + tf_flags |= ATA_TFLAG_FUA; break; case READ_6: case WRITE_6: - scsi_6_lba_len(scsicmd, &block, &n_block); + if (unlikely(scmd->cmd_len < 6)) + goto invalid_fld; + scsi_6_lba_len(cdb, &block, &n_block); /* for 6-byte r/w commands, transfer length 0 * means 256 blocks of data, not 0 block. @@ -1273,9 +1695,11 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm break; case READ_16: case WRITE_16: - scsi_16_lba_len(scsicmd, &block, &n_block); - if (unlikely(scsicmd[1] & (1 << 3))) - tf->flags |= ATA_TFLAG_FUA; + if (unlikely(scmd->cmd_len < 16)) + goto invalid_fld; + scsi_16_lba_len(cdb, &block, &n_block); + if (cdb[1] & (1 << 3)) + tf_flags |= ATA_TFLAG_FUA; break; default: DPRINTK("no-byte command\n"); @@ -1293,147 +1717,51 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm */ goto nothing_to_do; - if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) { - /* yay, NCQ */ - if (!lba_48_ok(block, n_block)) - goto out_of_range; - - tf->protocol = ATA_PROT_NCQ; - tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; - - if (tf->flags & ATA_TFLAG_WRITE) - tf->command = ATA_CMD_FPDMA_WRITE; - else - tf->command = ATA_CMD_FPDMA_READ; - - qc->nsect = n_block; - - tf->nsect = qc->tag << 3; - tf->hob_feature = (n_block >> 8) & 0xff; - tf->feature = n_block & 0xff; - - tf->hob_lbah = (block >> 40) & 0xff; - tf->hob_lbam = (block >> 32) & 0xff; - tf->hob_lbal = (block >> 24) & 0xff; - tf->lbah = (block >> 16) & 0xff; - tf->lbam = (block >> 8) & 0xff; - tf->lbal = block & 0xff; - - tf->device = 1 << 6; - if (tf->flags & ATA_TFLAG_FUA) - tf->device |= 1 << 7; - } else if (dev->flags & ATA_DFLAG_LBA) { - tf->flags |= ATA_TFLAG_LBA; - - if (lba_28_ok(block, n_block)) { - /* use LBA28 */ - tf->device |= (block >> 24) & 0xf; - } else if (lba_48_ok(block, n_block)) { - if (!(dev->flags & ATA_DFLAG_LBA48)) - goto out_of_range; - - /* use LBA48 */ - tf->flags |= ATA_TFLAG_LBA48; - - tf->hob_nsect = (n_block >> 8) & 0xff; - - tf->hob_lbah = (block >> 40) & 0xff; - tf->hob_lbam = (block >> 32) & 0xff; - tf->hob_lbal = (block >> 24) & 0xff; - } else - /* request too large even for LBA48 */ - goto out_of_range; - - if (unlikely(ata_rwcmd_protocol(qc) < 0)) - goto invalid_fld; - - qc->nsect = n_block; - tf->nsect = n_block & 0xff; - - tf->lbah = (block >> 16) & 0xff; - tf->lbam = (block >> 8) & 0xff; - tf->lbal = block & 0xff; - - tf->device |= ATA_LBA; - } else { - /* CHS */ - u32 sect, head, cyl, track; - - /* The request -may- be too large for CHS addressing. */ - if (!lba_28_ok(block, n_block)) - goto out_of_range; - - if (unlikely(ata_rwcmd_protocol(qc) < 0)) - goto invalid_fld; - - /* Convert LBA to CHS */ - track = (u32)block / dev->sectors; - cyl = track / dev->heads; - head = track % dev->heads; - sect = (u32)block % dev->sectors + 1; - - DPRINTK("block %u track %u cyl %u head %u sect %u\n", - (u32)block, track, cyl, head, sect); - - /* Check whether the converted CHS can fit. - Cylinder: 0-65535 - Head: 0-15 - Sector: 1-255*/ - if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) - goto out_of_range; - - qc->nsect = n_block; - tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ - tf->lbal = sect; - tf->lbam = cyl; - tf->lbah = cyl >> 8; - tf->device |= head; - } + qc->flags |= ATA_QCFLAG_IO; + qc->nbytes = n_block * scmd->device->sector_size; - return 0; + rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, + qc->tag); + if (likely(rc == 0)) + return 0; + if (rc == -ERANGE) + goto out_of_range; + /* treat all other errors as -EINVAL, fall through */ invalid_fld: - ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0); + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); /* "Invalid field in cbd" */ return 1; out_of_range: - ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0); + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0); /* "Logical Block Address out of range" */ return 1; nothing_to_do: - qc->scsicmd->result = SAM_STAT_GOOD; + scmd->result = SAM_STAT_GOOD; return 1; } static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) { + struct ata_port *ap = qc->ap; struct scsi_cmnd *cmd = qc->scsicmd; u8 *cdb = cmd->cmnd; - int need_sense = (qc->err_mask != 0); - - /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and - * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE - * cache - */ - if (!need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) && - ((qc->tf.feature == SETFEATURES_WC_ON) || - (qc->tf.feature == SETFEATURES_WC_OFF))) { - qc->ap->eh_info.action |= ATA_EH_REVALIDATE; - ata_port_schedule_eh(qc->ap); - } + int need_sense = (qc->err_mask != 0); /* For ATA pass thru (SAT) commands, generate a sense block if * user mandated it or if there's an error. Note that if we - * generate because the user forced us to, a check condition - * is generated and the ATA register values are returned + * generate because the user forced us to [CK_COND =1], a check + * condition is generated and the ATA register values are returned * whether the command completed successfully or not. If there - * was no error, SK, ASC and ASCQ will all be zero. + * was no error, we use the following sense data: + * sk = RECOVERED ERROR + * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE */ if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && - ((cdb[2] & 0x20) || need_sense)) { - ata_gen_ata_desc_sense(qc); + ((cdb[2] & 0x20) || need_sense)) { + ata_gen_passthru_sense(qc); } else { if (!need_sense) { cmd->result = SAM_STAT_GOOD; @@ -1444,12 +1772,12 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) * good for smaller LBA (and maybe CHS?) * devices. */ - ata_gen_fixed_sense(qc); + ata_gen_ata_sense(qc); } } - if (need_sense && !qc->ap->ops->error_handler) - ata_dump_status(qc->ap->id, &qc->result_tf); + if (need_sense && !ap->ops->error_handler) + ata_dump_status(ap->print_id, &qc->result_tf); qc->scsidone(cmd); @@ -1457,43 +1785,9 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) } /** - * ata_scmd_need_defer - Check whether we need to defer scmd - * @dev: ATA device to which the command is addressed - * @is_io: Is the command IO (and thus possibly NCQ)? - * - * NCQ and non-NCQ commands cannot run together. As upper layer - * only knows the queue depth, we are responsible for maintaining - * exclusion. This function checks whether a new command can be - * issued to @dev. - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - * RETURNS: - * 1 if deferring is needed, 0 otherwise. - */ -static int ata_scmd_need_defer(struct ata_device *dev, int is_io) -{ - struct ata_port *ap = dev->ap; - - if (!(dev->flags & ATA_DFLAG_NCQ)) - return 0; - - if (is_io) { - if (!ata_tag_valid(ap->active_tag)) - return 0; - } else { - if (!ata_tag_valid(ap->active_tag) && !ap->sactive) - return 0; - } - return 1; -} - -/** * ata_scsi_translate - Translate then issue SCSI command to ATA device * @dev: ATA device to which the command is addressed * @cmd: SCSI command to execute - * @done: SCSI command completion function * @xlat_func: Actor which translates @cmd to an ATA taskfile * * Our ->queuecommand() function has decided that the SCSI @@ -1517,45 +1811,41 @@ static int ata_scmd_need_defer(struct ata_device *dev, int is_io) * needs to be deferred. */ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *), ata_xlat_func_t xlat_func) { + struct ata_port *ap = dev->link->ap; struct ata_queued_cmd *qc; - u8 *scsicmd = cmd->cmnd; - int is_io = xlat_func == ata_scsi_rw_xlat; + int rc; VPRINTK("ENTER\n"); - if (unlikely(ata_scmd_need_defer(dev, is_io))) - goto defer; - - qc = ata_scsi_qc_new(dev, cmd, done); + qc = ata_scsi_qc_new(dev, cmd); if (!qc) goto err_mem; /* data is present; dma-map it */ if (cmd->sc_data_direction == DMA_FROM_DEVICE || cmd->sc_data_direction == DMA_TO_DEVICE) { - if (unlikely(cmd->request_bufflen < 1)) { - ata_dev_printk(dev, KERN_WARNING, - "WARNING: zero len r/w req\n"); + if (unlikely(scsi_bufflen(cmd) < 1)) { + ata_dev_warn(dev, "WARNING: zero len r/w req\n"); goto err_did; } - if (cmd->use_sg) - ata_sg_init(qc, cmd->request_buffer, cmd->use_sg); - else - ata_sg_init_one(qc, cmd->request_buffer, - cmd->request_bufflen); + ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd)); qc->dma_dir = cmd->sc_data_direction; } qc->complete_fn = ata_scsi_qc_complete; - if (xlat_func(qc, scsicmd)) + if (xlat_func(qc)) goto early_finish; + if (ap->ops->qc_defer) { + if ((rc = ap->ops->qc_defer(qc))) + goto defer; + } + /* select device, send command to hardware */ ata_qc_issue(qc); @@ -1563,77 +1853,73 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, return 0; early_finish: - ata_qc_free(qc); - done(cmd); + ata_qc_free(qc); + cmd->scsi_done(cmd); DPRINTK("EXIT - early finish (good or error)\n"); return 0; err_did: ata_qc_free(qc); -err_mem: cmd->result = (DID_ERROR << 16); - done(cmd); + cmd->scsi_done(cmd); +err_mem: DPRINTK("EXIT - internal\n"); return 0; defer: + ata_qc_free(qc); DPRINTK("EXIT - defer\n"); - return SCSI_MLQUEUE_DEVICE_BUSY; + if (rc == ATA_DEFER_LINK) + return SCSI_MLQUEUE_DEVICE_BUSY; + else + return SCSI_MLQUEUE_HOST_BUSY; } /** * ata_scsi_rbuf_get - Map response buffer. * @cmd: SCSI command containing buffer to be mapped. - * @buf_out: Pointer to mapped area. + * @flags: unsigned long variable to store irq enable status + * @copy_in: copy in from user buffer * - * Maps buffer contained within SCSI command @cmd. + * Prepare buffer for simulated SCSI commands. * * LOCKING: - * spin_lock_irqsave(host lock) + * spin_lock_irqsave(ata_scsi_rbuf_lock) on success * * RETURNS: - * Length of response buffer. + * Pointer to response buffer. */ - -static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out) +static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in, + unsigned long *flags) { - u8 *buf; - unsigned int buflen; - - if (cmd->use_sg) { - struct scatterlist *sg; + spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags); - sg = (struct scatterlist *) cmd->request_buffer; - buf = kmap_atomic(sg->page, KM_USER0) + sg->offset; - buflen = sg->length; - } else { - buf = cmd->request_buffer; - buflen = cmd->request_bufflen; - } - - *buf_out = buf; - return buflen; + memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE); + if (copy_in) + sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), + ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE); + return ata_scsi_rbuf; } /** * ata_scsi_rbuf_put - Unmap response buffer. * @cmd: SCSI command containing buffer to be unmapped. - * @buf: buffer to unmap + * @copy_out: copy out result + * @flags: @flags passed to ata_scsi_rbuf_get() * - * Unmaps response buffer contained within @cmd. + * Returns rbuf buffer. The result is copied to @cmd's buffer if + * @copy_back is true. * * LOCKING: - * spin_lock_irqsave(host lock) + * Unlocks ata_scsi_rbuf_lock. */ - -static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) +static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out, + unsigned long *flags) { - if (cmd->use_sg) { - struct scatterlist *sg; - - sg = (struct scatterlist *) cmd->request_buffer; - kunmap_atomic(buf - sg->offset, KM_USER0); - } + if (copy_out) + sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), + ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE); + spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags); } /** @@ -1651,19 +1937,17 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) * LOCKING: * spin_lock_irqsave(host lock) */ - -void ata_scsi_rbuf_fill(struct ata_scsi_args *args, - unsigned int (*actor) (struct ata_scsi_args *args, - u8 *rbuf, unsigned int buflen)) +static void ata_scsi_rbuf_fill(struct ata_scsi_args *args, + unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf)) { u8 *rbuf; - unsigned int buflen, rc; + unsigned int rc; struct scsi_cmnd *cmd = args->cmd; + unsigned long flags; - buflen = ata_scsi_rbuf_get(cmd, &rbuf); - memset(rbuf, 0, buflen); - rc = actor(args, rbuf, buflen); - ata_scsi_rbuf_put(cmd, rbuf); + rbuf = ata_scsi_rbuf_get(cmd, false, &flags); + rc = actor(args, rbuf); + ata_scsi_rbuf_put(cmd, rc == 0, &flags); if (rc == 0) cmd->result = SAM_STAT_GOOD; @@ -1674,7 +1958,6 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args, * ata_scsiop_inq_std - Simulate INQUIRY command * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Returns standard device identification data associated * with non-VPD INQUIRY command output. @@ -1682,10 +1965,17 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args, * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) { + const u8 versions[] = { + 0x60, /* SAM-3 (no version claimed) */ + + 0x03, + 0x20, /* SBC-2 (no version claimed) */ + + 0x02, + 0x60 /* SPC-3 (no version claimed) */ + }; u8 hdr[] = { TYPE_DISK, 0, @@ -1694,35 +1984,25 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 95 - 4 }; + VPRINTK("ENTER\n"); + /* set scsi removeable (RMB) bit per ata bit */ if (ata_id_removeable(args->id)) hdr[1] |= (1 << 7); - VPRINTK("ENTER\n"); - memcpy(rbuf, hdr, sizeof(hdr)); + memcpy(&rbuf[8], "ATA ", 8); + ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); - if (buflen > 35) { - memcpy(&rbuf[8], "ATA ", 8); - ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16); - ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4); - if (rbuf[32] == 0 || rbuf[32] == ' ') - memcpy(&rbuf[32], "n/a ", 4); - } - - if (buflen > 63) { - const u8 versions[] = { - 0x60, /* SAM-3 (no version claimed) */ + /* From SAT, use last 2 words from fw rev unless they are spaces */ + ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4); + if (strncmp(&rbuf[32], " ", 4) == 0) + ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); - 0x03, - 0x20, /* SBC-2 (no version claimed) */ + if (rbuf[32] == 0 || rbuf[32] == ' ') + memcpy(&rbuf[32], "n/a ", 4); - 0x02, - 0x60 /* SPC-3 (no version claimed) */ - }; - - memcpy(rbuf + 59, versions, sizeof(versions)); - } + memcpy(rbuf + 59, versions, sizeof(versions)); return 0; } @@ -1731,27 +2011,26 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Returns list of inquiry VPD pages available. * * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf) { const u8 pages[] = { 0x00, /* page 0x00, this page */ 0x80, /* page 0x80, unit serial no page */ - 0x83 /* page 0x83, device ident page */ + 0x83, /* page 0x83, device ident page */ + 0x89, /* page 0x89, ata info page */ + 0xb0, /* page 0xb0, block limits page */ + 0xb1, /* page 0xb1, block device characteristics page */ + 0xb2, /* page 0xb2, thin provisioning page */ }; - rbuf[3] = sizeof(pages); /* number of supported VPD pages */ - - if (buflen > 6) - memcpy(rbuf + 4, pages, sizeof(pages)); + rbuf[3] = sizeof(pages); /* number of supported VPD pages */ + memcpy(rbuf + 4, pages, sizeof(pages)); return 0; } @@ -1759,29 +2038,24 @@ unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Returns ATA device serial number. * * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf) { const u8 hdr[] = { 0, 0x80, /* this page code */ 0, - ATA_SERNO_LEN, /* page len */ + ATA_ID_SERNO_LEN, /* page len */ }; - memcpy(rbuf, hdr, sizeof(hdr)); - - if (buflen > (ATA_SERNO_LEN + 4 - 1)) - ata_id_string(args->id, (unsigned char *) &rbuf[4], - ATA_ID_SERNO_OFS, ATA_SERNO_LEN); + memcpy(rbuf, hdr, sizeof(hdr)); + ata_id_string(args->id, (unsigned char *) &rbuf[4], + ATA_ID_SERNO, ATA_ID_SERNO_LEN); return 0; } @@ -1789,7 +2063,6 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Yields two logical unit device identification designators: * - vendor specific ASCII containing the ATA serial number @@ -1799,51 +2072,153 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf) { - int num; const int sat_model_serial_desc_len = 68; - const int ata_model_byte_len = 40; + int num; rbuf[1] = 0x83; /* this page code */ num = 4; - if (buflen > (ATA_SERNO_LEN + num + 3)) { - /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ - rbuf[num + 0] = 2; - rbuf[num + 3] = ATA_SERNO_LEN; - num += 4; - ata_id_string(args->id, (unsigned char *) rbuf + num, - ATA_ID_SERNO_OFS, ATA_SERNO_LEN); - num += ATA_SERNO_LEN; - } - if (buflen > (sat_model_serial_desc_len + num + 3)) { - /* SAT defined lu model and serial numbers descriptor */ - /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ - rbuf[num + 0] = 2; - rbuf[num + 1] = 1; - rbuf[num + 3] = sat_model_serial_desc_len; + /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ + rbuf[num + 0] = 2; + rbuf[num + 3] = ATA_ID_SERNO_LEN; + num += 4; + ata_id_string(args->id, (unsigned char *) rbuf + num, + ATA_ID_SERNO, ATA_ID_SERNO_LEN); + num += ATA_ID_SERNO_LEN; + + /* SAT defined lu model and serial numbers descriptor */ + /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ + rbuf[num + 0] = 2; + rbuf[num + 1] = 1; + rbuf[num + 3] = sat_model_serial_desc_len; + num += 4; + memcpy(rbuf + num, "ATA ", 8); + num += 8; + ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD, + ATA_ID_PROD_LEN); + num += ATA_ID_PROD_LEN; + ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO, + ATA_ID_SERNO_LEN); + num += ATA_ID_SERNO_LEN; + + if (ata_id_has_wwn(args->id)) { + /* SAT defined lu world wide name */ + /* piv=0, assoc=lu, code_set=binary, designator=NAA */ + rbuf[num + 0] = 1; + rbuf[num + 1] = 3; + rbuf[num + 3] = ATA_ID_WWN_LEN; num += 4; - memcpy(rbuf + num, "ATA ", 8); - num += 8; - ata_id_string(args->id, (unsigned char *) rbuf + num, - ATA_ID_PROD_OFS, ata_model_byte_len); - num += ata_model_byte_len; ata_id_string(args->id, (unsigned char *) rbuf + num, - ATA_ID_SERNO_OFS, ATA_SERNO_LEN); - num += ATA_SERNO_LEN; + ATA_ID_WWN, ATA_ID_WWN_LEN); + num += ATA_ID_WWN_LEN; } rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ return 0; } /** + * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info + * @args: device IDENTIFY data / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * + * Yields SAT-specified ATA VPD page. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) +{ + struct ata_taskfile tf; + + memset(&tf, 0, sizeof(tf)); + + rbuf[1] = 0x89; /* our page code */ + rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */ + rbuf[3] = (0x238 & 0xff); + + memcpy(&rbuf[8], "linux ", 8); + memcpy(&rbuf[16], "libata ", 16); + memcpy(&rbuf[32], DRV_VERSION, 4); + + /* we don't store the ATA device signature, so we fake it */ + + tf.command = ATA_DRDY; /* really, this is Status reg */ + tf.lbal = 0x1; + tf.nsect = 0x1; + + ata_tf_to_fis(&tf, 0, 1, &rbuf[36]); /* TODO: PMP? */ + rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */ + + rbuf[56] = ATA_CMD_ID_ATA; + + memcpy(&rbuf[60], &args->id[0], 512); + return 0; +} + +static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) +{ + u16 min_io_sectors; + + rbuf[1] = 0xb0; + rbuf[3] = 0x3c; /* required VPD size with unmap support */ + + /* + * Optimal transfer length granularity. + * + * This is always one physical block, but for disks with a smaller + * logical than physical sector size we need to figure out what the + * latter is. + */ + min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id); + put_unaligned_be16(min_io_sectors, &rbuf[6]); + + /* + * Optimal unmap granularity. + * + * The ATA spec doesn't even know about a granularity or alignment + * for the TRIM command. We can leave away most of the unmap related + * VPD page entries, but we have specifify a granularity to signal + * that we support some form of unmap - in thise case via WRITE SAME + * with the unmap bit set. + */ + if (ata_id_has_trim(args->id)) { + put_unaligned_be64(65535 * 512 / 8, &rbuf[36]); + put_unaligned_be32(1, &rbuf[28]); + } + + return 0; +} + +static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf) +{ + int form_factor = ata_id_form_factor(args->id); + int media_rotation_rate = ata_id_rotation_rate(args->id); + + rbuf[1] = 0xb1; + rbuf[3] = 0x3c; + rbuf[4] = media_rotation_rate >> 8; + rbuf[5] = media_rotation_rate; + rbuf[7] = form_factor; + + return 0; +} + +static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf) +{ + /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */ + rbuf[1] = 0xb2; + rbuf[3] = 0x4; + rbuf[5] = 1 << 6; /* TPWS */ + + return 0; +} + +/** * ata_scsiop_noop - Command handler that simply returns success. * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * No operation. Simply returns success to caller, to indicate * that the caller should successfully complete this SCSI command. @@ -1851,47 +2226,40 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf) { VPRINTK("ENTER\n"); return 0; } /** - * ata_msense_push - Push data onto MODE SENSE data output buffer - * @ptr_io: (input/output) Location to store more output data - * @last: End of output data buffer - * @buf: Pointer to BLOB being added to output buffer - * @buflen: Length of BLOB + * modecpy - Prepare response for MODE SENSE + * @dest: output buffer + * @src: data being copied + * @n: length of mode page + * @changeable: whether changeable parameters are requested * - * Store MODE SENSE data on an output buffer. + * Generate a generic MODE SENSE page for either current or changeable + * parameters. * * LOCKING: * None. */ - -static void ata_msense_push(u8 **ptr_io, const u8 *last, - const u8 *buf, unsigned int buflen) +static void modecpy(u8 *dest, const u8 *src, int n, bool changeable) { - u8 *ptr = *ptr_io; - - if ((ptr + buflen - 1) > last) - return; - - memcpy(ptr, buf, buflen); - - ptr += buflen; - - *ptr_io = ptr; + if (changeable) { + memcpy(dest, src, 2); + memset(dest + 2, 0, n - 2); + } else { + memcpy(dest, src, n); + } } /** * ata_msense_caching - Simulate MODE SENSE caching info page * @id: device IDENTIFY data - * @ptr_io: (input/output) Location to store more output data - * @last: End of output data buffer + * @buf: output buffer + * @changeable: whether changeable parameters are requested * * Generate a caching info page, which conditionally indicates * write caching to the SCSI layer, depending on device @@ -1900,58 +2268,46 @@ static void ata_msense_push(u8 **ptr_io, const u8 *last, * LOCKING: * None. */ - -static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io, - const u8 *last) +static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable) { - u8 page[CACHE_MPAGE_LEN]; - - memcpy(page, def_cache_mpage, sizeof(page)); - if (ata_id_wcache_enabled(id)) - page[2] |= (1 << 2); /* write cache enable */ - if (!ata_id_rahead_enabled(id)) - page[12] |= (1 << 5); /* disable read ahead */ - - ata_msense_push(ptr_io, last, page, sizeof(page)); - return sizeof(page); + modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable); + if (changeable || ata_id_wcache_enabled(id)) + buf[2] |= (1 << 2); /* write cache enable */ + if (!changeable && !ata_id_rahead_enabled(id)) + buf[12] |= (1 << 5); /* disable read ahead */ + return sizeof(def_cache_mpage); } /** * ata_msense_ctl_mode - Simulate MODE SENSE control mode page - * @dev: Device associated with this MODE SENSE command - * @ptr_io: (input/output) Location to store more output data - * @last: End of output data buffer + * @buf: output buffer + * @changeable: whether changeable parameters are requested * * Generate a generic MODE SENSE control mode page. * * LOCKING: * None. */ - -static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last) +static unsigned int ata_msense_ctl_mode(u8 *buf, bool changeable) { - ata_msense_push(ptr_io, last, def_control_mpage, - sizeof(def_control_mpage)); + modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable); return sizeof(def_control_mpage); } /** * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page - * @dev: Device associated with this MODE SENSE command - * @ptr_io: (input/output) Location to store more output data - * @last: End of output data buffer + * @buf: output buffer + * @changeable: whether changeable parameters are requested * * Generate a generic MODE SENSE r/w error recovery page. * * LOCKING: * None. */ - -static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last) +static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable) { - - ata_msense_push(ptr_io, last, def_rw_recovery_mpage, - sizeof(def_rw_recovery_mpage)); + modecpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage), + changeable); return sizeof(def_rw_recovery_mpage); } @@ -1961,15 +2317,15 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last) */ static int ata_dev_supports_fua(u16 *id) { - unsigned char model[41], fw[9]; + unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1]; if (!libata_fua) return 0; if (!ata_id_has_fua(id)) return 0; - ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model)); - ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw)); + ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); + ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw)); if (strcmp(model, "Maxtor")) return 1; @@ -1983,7 +2339,6 @@ static int ata_dev_supports_fua(u16 *id) * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Simulate MODE SENSE commands. Assume this is invoked for direct * access devices (e.g. disks) only. There should be no block @@ -1992,19 +2347,17 @@ static int ata_dev_supports_fua(u16 *id) * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) { struct ata_device *dev = args->dev; - u8 *scsicmd = args->cmd->cmnd, *p, *last; + u8 *scsicmd = args->cmd->cmnd, *p = rbuf; const u8 sat_blk_desc[] = { 0, 0, 0, 0, /* number of blocks: sat unspecified */ 0, 0, 0x2, 0x0 /* block length: 512 bytes */ }; u8 pg, spg; - unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen; + unsigned int ebd, page_control, six_byte; u8 dpofua; VPRINTK("ENTER\n"); @@ -2018,26 +2371,19 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, page_control = scsicmd[2] >> 6; switch (page_control) { case 0: /* current */ + case 1: /* changeable */ + case 2: /* defaults */ break; /* supported */ case 3: /* saved */ goto saving_not_supp; - case 1: /* changeable */ - case 2: /* defaults */ default: goto invalid_fld; } - if (six_byte) { - output_len = 4 + (ebd ? 8 : 0); - alloc_len = scsicmd[4]; - } else { - output_len = 8 + (ebd ? 8 : 0); - alloc_len = (scsicmd[7] << 8) + scsicmd[8]; - } - minlen = (alloc_len < buflen) ? alloc_len : buflen; - - p = rbuf + output_len; - last = rbuf + minlen - 1; + if (six_byte) + p += 4 + (ebd ? 8 : 0); + else + p += 8 + (ebd ? 8 : 0); pg = scsicmd[2] & 0x3f; spg = scsicmd[3]; @@ -2050,61 +2396,48 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, switch(pg) { case RW_RECOVERY_MPAGE: - output_len += ata_msense_rw_recovery(&p, last); + p += ata_msense_rw_recovery(p, page_control == 1); break; case CACHE_MPAGE: - output_len += ata_msense_caching(args->id, &p, last); + p += ata_msense_caching(args->id, p, page_control == 1); break; - case CONTROL_MPAGE: { - output_len += ata_msense_ctl_mode(&p, last); + case CONTROL_MPAGE: + p += ata_msense_ctl_mode(p, page_control == 1); break; - } case ALL_MPAGES: - output_len += ata_msense_rw_recovery(&p, last); - output_len += ata_msense_caching(args->id, &p, last); - output_len += ata_msense_ctl_mode(&p, last); + p += ata_msense_rw_recovery(p, page_control == 1); + p += ata_msense_caching(args->id, p, page_control == 1); + p += ata_msense_ctl_mode(p, page_control == 1); break; default: /* invalid page code */ goto invalid_fld; } - if (minlen < 1) - return 0; - dpofua = 0; if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) && (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) dpofua = 1 << 4; if (six_byte) { - output_len--; - rbuf[0] = output_len; - if (minlen > 2) - rbuf[2] |= dpofua; + rbuf[0] = p - rbuf - 1; + rbuf[2] |= dpofua; if (ebd) { - if (minlen > 3) - rbuf[3] = sizeof(sat_blk_desc); - if (minlen > 11) - memcpy(rbuf + 4, sat_blk_desc, - sizeof(sat_blk_desc)); + rbuf[3] = sizeof(sat_blk_desc); + memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc)); } } else { - output_len -= 2; + unsigned int output_len = p - rbuf - 2; + rbuf[0] = output_len >> 8; - if (minlen > 1) - rbuf[1] = output_len; - if (minlen > 3) - rbuf[3] |= dpofua; + rbuf[1] = output_len; + rbuf[3] |= dpofua; if (ebd) { - if (minlen > 7) - rbuf[7] = sizeof(sat_blk_desc); - if (minlen > 15) - memcpy(rbuf + 8, sat_blk_desc, - sizeof(sat_blk_desc)); + rbuf[7] = sizeof(sat_blk_desc); + memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc)); } } return 0; @@ -2124,72 +2457,69 @@ saving_not_supp: * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Simulate READ CAPACITY commands. * * LOCKING: - * spin_lock_irqsave(host lock) + * None. */ - -unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) { - u64 n_sectors; - u32 tmp; - - VPRINTK("ENTER\n"); - - if (ata_id_has_lba(args->id)) { - if (ata_id_has_lba48(args->id)) - n_sectors = ata_id_u64(args->id, 100); - else - n_sectors = ata_id_u32(args->id, 60); - } else { - /* CHS default translation */ - n_sectors = args->id[1] * args->id[3] * args->id[6]; + struct ata_device *dev = args->dev; + u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ + u32 sector_size; /* physical sector size in bytes */ + u8 log2_per_phys; + u16 lowest_aligned; - if (ata_id_current_chs_valid(args->id)) - /* CHS current translation */ - n_sectors = ata_id_u32(args->id, 57); - } + sector_size = ata_id_logical_sector_size(dev->id); + log2_per_phys = ata_id_log2_per_physical_sector(dev->id); + lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys); - n_sectors--; /* ATA TotalUserSectors - 1 */ + VPRINTK("ENTER\n"); if (args->cmd->cmnd[0] == READ_CAPACITY) { - if( n_sectors >= 0xffffffffULL ) - tmp = 0xffffffff ; /* Return max count on overflow */ - else - tmp = n_sectors ; + if (last_lba >= 0xffffffffULL) + last_lba = 0xffffffff; /* sector count, 32-bit */ - rbuf[0] = tmp >> (8 * 3); - rbuf[1] = tmp >> (8 * 2); - rbuf[2] = tmp >> (8 * 1); - rbuf[3] = tmp; + rbuf[0] = last_lba >> (8 * 3); + rbuf[1] = last_lba >> (8 * 2); + rbuf[2] = last_lba >> (8 * 1); + rbuf[3] = last_lba; /* sector size */ - tmp = ATA_SECT_SIZE; - rbuf[6] = tmp >> 8; - rbuf[7] = tmp; - + rbuf[4] = sector_size >> (8 * 3); + rbuf[5] = sector_size >> (8 * 2); + rbuf[6] = sector_size >> (8 * 1); + rbuf[7] = sector_size; } else { /* sector count, 64-bit */ - tmp = n_sectors >> (8 * 4); - rbuf[2] = tmp >> (8 * 3); - rbuf[3] = tmp >> (8 * 2); - rbuf[4] = tmp >> (8 * 1); - rbuf[5] = tmp; - tmp = n_sectors; - rbuf[6] = tmp >> (8 * 3); - rbuf[7] = tmp >> (8 * 2); - rbuf[8] = tmp >> (8 * 1); - rbuf[9] = tmp; + rbuf[0] = last_lba >> (8 * 7); + rbuf[1] = last_lba >> (8 * 6); + rbuf[2] = last_lba >> (8 * 5); + rbuf[3] = last_lba >> (8 * 4); + rbuf[4] = last_lba >> (8 * 3); + rbuf[5] = last_lba >> (8 * 2); + rbuf[6] = last_lba >> (8 * 1); + rbuf[7] = last_lba; /* sector size */ - tmp = ATA_SECT_SIZE; - rbuf[12] = tmp >> 8; - rbuf[13] = tmp; + rbuf[ 8] = sector_size >> (8 * 3); + rbuf[ 9] = sector_size >> (8 * 2); + rbuf[10] = sector_size >> (8 * 1); + rbuf[11] = sector_size; + + rbuf[12] = 0; + rbuf[13] = log2_per_phys; + rbuf[14] = (lowest_aligned >> 8) & 0x3f; + rbuf[15] = lowest_aligned; + + if (ata_id_has_trim(args->id)) { + rbuf[14] |= 0x80; /* TPE */ + + if (ata_id_has_zero_after_trim(args->id)) + rbuf[14] |= 0x40; /* TPRZ */ + } } return 0; @@ -2199,16 +2529,13 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, * ata_scsiop_report_luns - Simulate REPORT LUNS command * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Simulate REPORT LUNS command. * * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf) { VPRINTK("ENTER\n"); rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ @@ -2216,57 +2543,6 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, return 0; } -/** - * ata_scsi_set_sense - Set SCSI sense data and status - * @cmd: SCSI request to be handled - * @sk: SCSI-defined sense key - * @asc: SCSI-defined additional sense code - * @ascq: SCSI-defined additional sense code qualifier - * - * Helper function that builds a valid fixed format, current - * response code and the given sense key (sk), additional sense - * code (asc) and additional sense code qualifier (ascq) with - * a SCSI command status of %SAM_STAT_CHECK_CONDITION and - * DRIVER_SENSE set in the upper bits of scsi_cmnd::result . - * - * LOCKING: - * Not required - */ - -void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) -{ - cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; - - cmd->sense_buffer[0] = 0x70; /* fixed format, current */ - cmd->sense_buffer[2] = sk; - cmd->sense_buffer[7] = 18 - 8; /* additional sense length */ - cmd->sense_buffer[12] = asc; - cmd->sense_buffer[13] = ascq; -} - -/** - * ata_scsi_badcmd - End a SCSI request with an error - * @cmd: SCSI request to be handled - * @done: SCSI command completion function - * @asc: SCSI-defined additional sense code - * @ascq: SCSI-defined additional sense code qualifier - * - * Helper function that completes a SCSI command with - * %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST - * and the specified additional sense codes. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ - -void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) -{ - DPRINTK("ENTER\n"); - ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq); - - done(cmd); -} - static void atapi_sense_complete(struct ata_queued_cmd *qc) { if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) { @@ -2275,7 +2551,7 @@ static void atapi_sense_complete(struct ata_queued_cmd *qc) * a sense descriptors, since that's only * correct for ATA, not ATAPI */ - ata_gen_ata_desc_sense(qc); + ata_gen_passthru_sense(qc); } qc->scsidone(qc->scsicmd); @@ -2296,9 +2572,12 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) DPRINTK("ATAPI request sense\n"); /* FIXME: is this needed? */ - memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); + memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); - ap->ops->tf_read(ap, &qc->tf); +#ifdef CONFIG_ATA_SFF + if (ap->ops->sff_tf_read) + ap->ops->sff_tf_read(ap, &qc->tf); +#endif /* fill these in, for the case where they are -not- overwritten */ cmd->sense_buffer[0] = 0x70; @@ -2306,7 +2585,9 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) ata_qc_reinit(qc); - ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); + /* setup sg table and init transfer direction */ + sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); + ata_sg_init(qc, &qc->sgent, 1); qc->dma_dir = DMA_FROM_DEVICE; memset(&qc->cdb, 0, qc->dev->cdb_len); @@ -2317,12 +2598,12 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) qc->tf.command = ATA_CMD_PACKET; if (ata_pio_use_silly(ap)) { - qc->tf.protocol = ATA_PROT_ATAPI_DMA; + qc->tf.protocol = ATAPI_PROT_DMA; qc->tf.feature |= ATAPI_PKT_DMA; } else { - qc->tf.protocol = ATA_PROT_ATAPI; - qc->tf.lbam = (8 * 1024) & 0xff; - qc->tf.lbah = (8 * 1024) >> 8; + qc->tf.protocol = ATAPI_PROT_PIO; + qc->tf.lbam = SCSI_SENSE_BUFFERSIZE; + qc->tf.lbah = 0; } qc->nbytes = SCSI_SENSE_BUFFERSIZE; @@ -2350,7 +2631,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) * sense descriptors, since that's only * correct for ATA, not ATAPI */ - ata_gen_ata_desc_sense(qc); + ata_gen_passthru_sense(qc); } /* SCSI EH automatically locks door if sdev->locked is @@ -2362,8 +2643,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) * * If door lock fails, always clear sdev->locked to * avoid this infinite loop. + * + * This may happen before SCSI scan is complete. Make + * sure qc->dev->sdev isn't NULL before dereferencing. */ - if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) + if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) qc->dev->sdev->locked = 0; qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; @@ -2383,15 +2667,15 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) * a sense descriptors, since that's only * correct for ATA, not ATAPI */ - ata_gen_ata_desc_sense(qc); + ata_gen_passthru_sense(qc); } else { u8 *scsicmd = cmd->cmnd; if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { - u8 *buf = NULL; - unsigned int buflen; + unsigned long flags; + u8 *buf; - buflen = ata_scsi_rbuf_get(cmd, &buf); + buf = ata_scsi_rbuf_get(cmd, true, &flags); /* ATAPI devices typically report zero for their SCSI version, * and sometimes deviate from the spec WRT response data @@ -2406,7 +2690,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) buf[3] = 0x32; } - ata_scsi_rbuf_put(cmd, buf); + ata_scsi_rbuf_put(cmd, true, &flags); } cmd->result = SAM_STAT_GOOD; @@ -2418,7 +2702,6 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) /** * atapi_xlat - Initialize PACKET taskfile * @qc: command structure to be initialized - * @scsicmd: SCSI CDB associated with this PACKET command * * LOCKING: * spin_lock_irqsave(host lock) @@ -2426,101 +2709,120 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) * RETURNS: * Zero on success, non-zero on failure. */ - -static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd) +static unsigned int atapi_xlat(struct ata_queued_cmd *qc) { - struct scsi_cmnd *cmd = qc->scsicmd; + struct scsi_cmnd *scmd = qc->scsicmd; struct ata_device *dev = qc->dev; - int using_pio = (dev->flags & ATA_DFLAG_PIO); - int nodata = (cmd->sc_data_direction == DMA_NONE); - - if (!using_pio) - /* Check whether ATAPI DMA is safe */ - if (ata_check_atapi_dma(qc)) - using_pio = 1; + int nodata = (scmd->sc_data_direction == DMA_NONE); + int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO); + unsigned int nbytes; - memcpy(&qc->cdb, scsicmd, dev->cdb_len); + memset(qc->cdb, 0, dev->cdb_len); + memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); qc->complete_fn = atapi_qc_complete; qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; - if (cmd->sc_data_direction == DMA_TO_DEVICE) { + if (scmd->sc_data_direction == DMA_TO_DEVICE) { qc->tf.flags |= ATA_TFLAG_WRITE; DPRINTK("direction: write\n"); } qc->tf.command = ATA_CMD_PACKET; + ata_qc_set_pc_nbytes(qc); - /* no data, or PIO data xfer */ - if (using_pio || nodata) { - if (nodata) - qc->tf.protocol = ATA_PROT_ATAPI_NODATA; - else - qc->tf.protocol = ATA_PROT_ATAPI; - qc->tf.lbam = (8 * 1024) & 0xff; - qc->tf.lbah = (8 * 1024) >> 8; - } + /* check whether ATAPI DMA is safe */ + if (!nodata && !using_pio && atapi_check_dma(qc)) + using_pio = 1; + + /* Some controller variants snoop this value for Packet + * transfers to do state machine and FIFO management. Thus we + * want to set it properly, and for DMA where it is + * effectively meaningless. + */ + nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024); + + /* Most ATAPI devices which honor transfer chunk size don't + * behave according to the spec when odd chunk size which + * matches the transfer length is specified. If the number of + * bytes to transfer is 2n+1. According to the spec, what + * should happen is to indicate that 2n+1 is going to be + * transferred and transfer 2n+2 bytes where the last byte is + * padding. + * + * In practice, this doesn't happen. ATAPI devices first + * indicate and transfer 2n bytes and then indicate and + * transfer 2 bytes where the last byte is padding. + * + * This inconsistency confuses several controllers which + * perform PIO using DMA such as Intel AHCIs and sil3124/32. + * These controllers use actual number of transferred bytes to + * update DMA poitner and transfer of 4n+2 bytes make those + * controller push DMA pointer by 4n+4 bytes because SATA data + * FISes are aligned to 4 bytes. This causes data corruption + * and buffer overrun. + * + * Always setting nbytes to even number solves this problem + * because then ATAPI devices don't have to split data at 2n + * boundaries. + */ + if (nbytes & 0x1) + nbytes++; - /* DMA data xfer */ + qc->tf.lbam = (nbytes & 0xFF); + qc->tf.lbah = (nbytes >> 8); + + if (nodata) + qc->tf.protocol = ATAPI_PROT_NODATA; + else if (using_pio) + qc->tf.protocol = ATAPI_PROT_PIO; else { - qc->tf.protocol = ATA_PROT_ATAPI_DMA; + /* DMA data xfer */ + qc->tf.protocol = ATAPI_PROT_DMA; qc->tf.feature |= ATAPI_PKT_DMA; - if (atapi_dmadir && (cmd->sc_data_direction != DMA_TO_DEVICE)) + if ((dev->flags & ATA_DFLAG_DMADIR) && + (scmd->sc_data_direction != DMA_TO_DEVICE)) /* some SATA bridges need us to indicate data xfer direction */ qc->tf.feature |= ATAPI_DMADIR; } - qc->nbytes = cmd->request_bufflen; + /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE + as ATAPI tape drives don't get this right otherwise */ return 0; } -static struct ata_device * ata_find_dev(struct ata_port *ap, int id) -{ - if (likely(id < ATA_MAX_DEVICES)) - return &ap->device[id]; - return NULL; -} - -static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, - const struct scsi_device *scsidev) +static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) { - /* skip commands not addressed to targets we simulate */ - if (unlikely(scsidev->channel || scsidev->lun)) - return NULL; + if (!sata_pmp_attached(ap)) { + if (likely(devno < ata_link_max_devices(&ap->link))) + return &ap->link.device[devno]; + } else { + if (likely(devno < ap->nr_pmp_links)) + return &ap->pmp_link[devno].device[0]; + } - return ata_find_dev(ap, scsidev->id); + return NULL; } -/** - * ata_scsi_dev_enabled - determine if device is enabled - * @dev: ATA device - * - * Determine if commands should be sent to the specified device. - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - * RETURNS: - * 0 if commands are not allowed / 1 if commands are allowed - */ - -static int ata_scsi_dev_enabled(struct ata_device *dev) +static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, + const struct scsi_device *scsidev) { - if (unlikely(!ata_dev_enabled(dev))) - return 0; + int devno; - if (!atapi_enabled || (dev->ap->flags & ATA_FLAG_NO_ATAPI)) { - if (unlikely(dev->class == ATA_DEV_ATAPI)) { - ata_dev_printk(dev, KERN_WARNING, - "WARNING: ATAPI is %s, device ignored.\n", - atapi_enabled ? "not supported with this driver" : "disabled"); - return 0; - } + /* skip commands not addressed to targets we simulate */ + if (!sata_pmp_attached(ap)) { + if (unlikely(scsidev->channel || scsidev->lun)) + return NULL; + devno = scsidev->id; + } else { + if (unlikely(scsidev->id || scsidev->lun)) + return NULL; + devno = scsidev->channel; } - return 1; + return ata_find_dev(ap, devno); } /** @@ -2544,7 +2846,7 @@ ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) { struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev); - if (unlikely(!dev || !ata_scsi_dev_enabled(dev))) + if (unlikely(!dev || !ata_dev_enabled(dev))) return NULL; return dev; @@ -2561,28 +2863,27 @@ static u8 ata_scsi_map_proto(u8 byte1) { switch((byte1 & 0x1e) >> 1) { - case 3: /* Non-data */ - return ATA_PROT_NODATA; - - case 6: /* DMA */ - return ATA_PROT_DMA; - - case 4: /* PIO Data-in */ - case 5: /* PIO Data-out */ - return ATA_PROT_PIO; - - case 10: /* Device Reset */ - case 0: /* Hard Reset */ - case 1: /* SRST */ - case 2: /* Bus Idle */ - case 7: /* Packet */ - case 8: /* DMA Queued */ - case 9: /* Device Diagnostic */ - case 11: /* UDMA Data-in */ - case 12: /* UDMA Data-Out */ - case 13: /* FPDMA */ - default: /* Reserved */ - break; + case 3: /* Non-data */ + return ATA_PROT_NODATA; + + case 6: /* DMA */ + case 10: /* UDMA Data-in */ + case 11: /* UDMA Data-Out */ + return ATA_PROT_DMA; + + case 4: /* PIO Data-in */ + case 5: /* PIO Data-out */ + return ATA_PROT_PIO; + + case 0: /* Hard Reset */ + case 1: /* SRST */ + case 8: /* Device Diagnostic */ + case 9: /* Device Reset */ + case 7: /* DMA Queued */ + case 12: /* FPDMA */ + case 15: /* Return Response Info */ + default: /* Reserved */ + break; } return ATA_PROT_UNKNOWN; @@ -2591,47 +2892,38 @@ ata_scsi_map_proto(u8 byte1) /** * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile * @qc: command structure to be initialized - * @scsicmd: SCSI command to convert * * Handles either 12 or 16-byte versions of the CDB. * * RETURNS: * Zero on success, non-zero on failure. */ -static unsigned int -ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd) +static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) { struct ata_taskfile *tf = &(qc->tf); - struct scsi_cmnd *cmd = qc->scsicmd; + struct scsi_cmnd *scmd = qc->scsicmd; struct ata_device *dev = qc->dev; + const u8 *cdb = scmd->cmnd; - if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN) - goto invalid_fld; - - /* We may not issue DMA commands if no DMA mode is set */ - if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) - goto invalid_fld; - - if (scsicmd[1] & 0xe0) - /* PIO multi not supported yet */ + if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN) goto invalid_fld; /* * 12 and 16 byte CDBs use different offsets to * provide the various register values. */ - if (scsicmd[0] == ATA_16) { + if (cdb[0] == ATA_16) { /* * 16-byte CDB - may contain extended commands. * * If that is the case, copy the upper byte register values. */ - if (scsicmd[1] & 0x01) { - tf->hob_feature = scsicmd[3]; - tf->hob_nsect = scsicmd[5]; - tf->hob_lbal = scsicmd[7]; - tf->hob_lbam = scsicmd[9]; - tf->hob_lbah = scsicmd[11]; + if (cdb[1] & 0x01) { + tf->hob_feature = cdb[3]; + tf->hob_nsect = cdb[5]; + tf->hob_lbal = cdb[7]; + tf->hob_lbam = cdb[9]; + tf->hob_lbah = cdb[11]; tf->flags |= ATA_TFLAG_LBA48; } else tf->flags &= ~ATA_TFLAG_LBA48; @@ -2639,33 +2931,119 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd) /* * Always copy low byte, device and command registers. */ - tf->feature = scsicmd[4]; - tf->nsect = scsicmd[6]; - tf->lbal = scsicmd[8]; - tf->lbam = scsicmd[10]; - tf->lbah = scsicmd[12]; - tf->device = scsicmd[13]; - tf->command = scsicmd[14]; + tf->feature = cdb[4]; + tf->nsect = cdb[6]; + tf->lbal = cdb[8]; + tf->lbam = cdb[10]; + tf->lbah = cdb[12]; + tf->device = cdb[13]; + tf->command = cdb[14]; } else { /* * 12-byte CDB - incapable of extended commands. */ tf->flags &= ~ATA_TFLAG_LBA48; - tf->feature = scsicmd[3]; - tf->nsect = scsicmd[4]; - tf->lbal = scsicmd[5]; - tf->lbam = scsicmd[6]; - tf->lbah = scsicmd[7]; - tf->device = scsicmd[8]; - tf->command = scsicmd[9]; + tf->feature = cdb[3]; + tf->nsect = cdb[4]; + tf->lbal = cdb[5]; + tf->lbam = cdb[6]; + tf->lbah = cdb[7]; + tf->device = cdb[8]; + tf->command = cdb[9]; + } + + /* enforce correct master/slave bit */ + tf->device = dev->devno ? + tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; + + switch (tf->command) { + /* READ/WRITE LONG use a non-standard sect_size */ + case ATA_CMD_READ_LONG: + case ATA_CMD_READ_LONG_ONCE: + case ATA_CMD_WRITE_LONG: + case ATA_CMD_WRITE_LONG_ONCE: + if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) + goto invalid_fld; + qc->sect_size = scsi_bufflen(scmd); + break; + + /* commands using reported Logical Block size (e.g. 512 or 4K) */ + case ATA_CMD_CFA_WRITE_NE: + case ATA_CMD_CFA_TRANS_SECT: + case ATA_CMD_CFA_WRITE_MULT_NE: + /* XXX: case ATA_CMD_CFA_WRITE_SECTORS_WITHOUT_ERASE: */ + case ATA_CMD_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ_QUEUED: + /* XXX: case ATA_CMD_READ_QUEUED_EXT: */ + case ATA_CMD_FPDMA_READ: + case ATA_CMD_READ_MULTI: + case ATA_CMD_READ_MULTI_EXT: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_READ_STREAM_DMA_EXT: + case ATA_CMD_READ_STREAM_EXT: + case ATA_CMD_VERIFY: + case ATA_CMD_VERIFY_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE_FUA_EXT: + case ATA_CMD_WRITE_QUEUED: + case ATA_CMD_WRITE_QUEUED_FUA_EXT: + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_WRITE_MULTI: + case ATA_CMD_WRITE_MULTI_EXT: + case ATA_CMD_WRITE_MULTI_FUA_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + case ATA_CMD_WRITE_STREAM_DMA_EXT: + case ATA_CMD_WRITE_STREAM_EXT: + qc->sect_size = scmd->device->sector_size; + break; + + /* Everything else uses 512 byte "sectors" */ + default: + qc->sect_size = ATA_SECT_SIZE; } + /* - * If slave is possible, enforce correct master/slave bit - */ - if (qc->ap->flags & ATA_FLAG_SLAVE_POSS) - tf->device = qc->dev->devno ? - tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; + * Set flags so that all registers will be written, pass on + * write indication (used for PIO/DMA setup), result TF is + * copied back and we don't whine too much about its failure. + */ + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + if (scmd->sc_data_direction == DMA_TO_DEVICE) + tf->flags |= ATA_TFLAG_WRITE; + + qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; + + /* + * Set transfer length. + * + * TODO: find out if we need to do more here to + * cover scatter/gather case. + */ + ata_qc_set_pc_nbytes(qc); + + /* We may not issue DMA commands if no DMA mode is set */ + if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) + goto invalid_fld; + + /* sanity check for pio multi commands */ + if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) + goto invalid_fld; + + if (is_multi_taskfile(tf)) { + unsigned int multi_count = 1 << (cdb[1] >> 5); + + /* compare the passed through multi_count + * with the cached multi_count of libata + */ + if (multi_count != dev->multi_count) + ata_dev_warn(dev, "invalid multi_count %u ignored\n", + multi_count); + } /* * Filter SET_FEATURES - XFER MODE command -- otherwise, @@ -2674,40 +3052,284 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd) * controller (i.e. the reason for ->set_piomode(), * ->set_dmamode(), and ->post_set_mode() hooks). */ - if ((tf->command == ATA_CMD_SET_FEATURES) - && (tf->feature == SETFEATURES_XFER)) + if (tf->command == ATA_CMD_SET_FEATURES && + tf->feature == SETFEATURES_XFER) goto invalid_fld; /* - * Set flags so that all registers will be written, - * and pass on write indication (used for PIO/DMA - * setup.) + * Filter TPM commands by default. These provide an + * essentially uncontrolled encrypted "back door" between + * applications and the disk. Set libata.allow_tpm=1 if you + * have a real reason for wanting to use them. This ensures + * that installed software cannot easily mess stuff up without + * user intent. DVR type users will probably ship with this enabled + * for movie content management. + * + * Note that for ATA8 we can issue a DCS change and DCS freeze lock + * for this and should do in future but that it is not sufficient as + * DCS is an optional feature set. Thus we also do the software filter + * so that we comply with the TC consortium stated goal that the user + * can turn off TC features of their system. */ - tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE); + if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) + goto invalid_fld; - if (cmd->sc_data_direction == DMA_TO_DEVICE) - tf->flags |= ATA_TFLAG_WRITE; + return 0; + + invalid_fld: + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00); + /* "Invalid field in cdb" */ + return 1; +} + +static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) +{ + struct ata_taskfile *tf = &qc->tf; + struct scsi_cmnd *scmd = qc->scsicmd; + struct ata_device *dev = qc->dev; + const u8 *cdb = scmd->cmnd; + u64 block; + u32 n_block; + u32 size; + void *buf; + + /* we may not issue DMA commands if no DMA mode is set */ + if (unlikely(!dev->dma_mode)) + goto invalid_fld; + + if (unlikely(scmd->cmd_len < 16)) + goto invalid_fld; + scsi_16_lba_len(cdb, &block, &n_block); + + /* for now we only support WRITE SAME with the unmap bit set */ + if (unlikely(!(cdb[1] & 0x8))) + goto invalid_fld; /* - * Set transfer length. - * - * TODO: find out if we need to do more here to - * cover scatter/gather case. + * WRITE SAME always has a sector sized buffer as payload, this + * should never be a multiple entry S/G list. */ - qc->nsect = cmd->request_bufflen / ATA_SECT_SIZE; + if (!scsi_sg_count(scmd)) + goto invalid_fld; + + buf = page_address(sg_page(scsi_sglist(scmd))); + size = ata_set_lba_range_entries(buf, 512, block, n_block); + + if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) { + /* Newer devices support queued TRIM commands */ + tf->protocol = ATA_PROT_NCQ; + tf->command = ATA_CMD_FPDMA_SEND; + tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; + tf->nsect = qc->tag << 3; + tf->hob_feature = (size / 512) >> 8; + tf->feature = size / 512; + + tf->auxiliary = 1; + } else { + tf->protocol = ATA_PROT_DMA; + tf->hob_feature = 0; + tf->feature = ATA_DSM_TRIM; + tf->hob_nsect = (size / 512) >> 8; + tf->nsect = size / 512; + tf->command = ATA_CMD_DSM; + } - /* request result TF */ - qc->flags |= ATA_QCFLAG_RESULT_TF; + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | + ATA_TFLAG_WRITE; + + ata_qc_set_pc_nbytes(qc); return 0; invalid_fld: - ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x00); + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00); /* "Invalid field in cdb" */ return 1; } /** + * ata_mselect_caching - Simulate MODE SELECT for caching info page + * @qc: Storage for translated ATA taskfile + * @buf: input buffer + * @len: number of valid bytes in the input buffer + * + * Prepare a taskfile to modify caching information for the device. + * + * LOCKING: + * None. + */ +static int ata_mselect_caching(struct ata_queued_cmd *qc, + const u8 *buf, int len) +{ + struct ata_taskfile *tf = &qc->tf; + struct ata_device *dev = qc->dev; + char mpage[CACHE_MPAGE_LEN]; + u8 wce; + + /* + * The first two bytes of def_cache_mpage are a header, so offsets + * in mpage are off by 2 compared to buf. Same for len. + */ + + if (len != CACHE_MPAGE_LEN - 2) + return -EINVAL; + + wce = buf[0] & (1 << 2); + + /* + * Check that read-only bits are not modified. + */ + ata_msense_caching(dev->id, mpage, false); + mpage[2] &= ~(1 << 2); + mpage[2] |= wce; + if (memcmp(mpage + 2, buf, CACHE_MPAGE_LEN - 2) != 0) + return -EINVAL; + + tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; + tf->protocol = ATA_PROT_NODATA; + tf->nsect = 0; + tf->command = ATA_CMD_SET_FEATURES; + tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF; + return 0; +} + +/** + * ata_scsiop_mode_select - Simulate MODE SELECT 6, 10 commands + * @qc: Storage for translated ATA taskfile + * + * Converts a MODE SELECT command to an ATA SET FEATURES taskfile. + * Assume this is invoked for direct access devices (e.g. disks) only. + * There should be no block descriptor for other device types. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) +{ + struct scsi_cmnd *scmd = qc->scsicmd; + const u8 *cdb = scmd->cmnd; + const u8 *p; + u8 pg, spg; + unsigned six_byte, pg_len, hdr_len, bd_len; + int len; + + VPRINTK("ENTER\n"); + + six_byte = (cdb[0] == MODE_SELECT); + if (six_byte) { + if (scmd->cmd_len < 5) + goto invalid_fld; + + len = cdb[4]; + hdr_len = 4; + } else { + if (scmd->cmd_len < 9) + goto invalid_fld; + + len = (cdb[7] << 8) + cdb[8]; + hdr_len = 8; + } + + /* We only support PF=1, SP=0. */ + if ((cdb[1] & 0x11) != 0x10) + goto invalid_fld; + + /* Test early for possible overrun. */ + if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) + goto invalid_param_len; + + p = page_address(sg_page(scsi_sglist(scmd))); + + /* Move past header and block descriptors. */ + if (len < hdr_len) + goto invalid_param_len; + + if (six_byte) + bd_len = p[3]; + else + bd_len = (p[6] << 8) + p[7]; + + len -= hdr_len; + p += hdr_len; + if (len < bd_len) + goto invalid_param_len; + if (bd_len != 0 && bd_len != 8) + goto invalid_param; + + len -= bd_len; + p += bd_len; + if (len == 0) + goto skip; + + /* Parse both possible formats for the mode page headers. */ + pg = p[0] & 0x3f; + if (p[0] & 0x40) { + if (len < 4) + goto invalid_param_len; + + spg = p[1]; + pg_len = (p[2] << 8) | p[3]; + p += 4; + len -= 4; + } else { + if (len < 2) + goto invalid_param_len; + + spg = 0; + pg_len = p[1]; + p += 2; + len -= 2; + } + + /* + * No mode subpages supported (yet) but asking for _all_ + * subpages may be valid + */ + if (spg && (spg != ALL_SUB_MPAGES)) + goto invalid_param; + if (pg_len > len) + goto invalid_param_len; + + switch (pg) { + case CACHE_MPAGE: + if (ata_mselect_caching(qc, p, pg_len) < 0) + goto invalid_param; + break; + + default: /* invalid page code */ + goto invalid_param; + } + + /* + * Only one page has changeable data, so we only support setting one + * page at a time. + */ + if (len > pg_len) + goto invalid_param; + + return 0; + + invalid_fld: + /* "Invalid field in CDB" */ + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); + return 1; + + invalid_param: + /* "Invalid field in parameter list" */ + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x26, 0x0); + return 1; + + invalid_param_len: + /* "Parameter list length error" */ + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x1a, 0x0); + return 1; + + skip: + scmd->result = SAM_STAT_GOOD; + return 1; +} + +/** * ata_get_xlat_func - check if SCSI to ATA translation is possible * @dev: ATA device * @cmd: SCSI command opcode to consider @@ -2731,6 +3353,9 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) case WRITE_16: return ata_scsi_rw_xlat; + case WRITE_SAME_16: + return ata_scsi_write_same_xlat; + case SYNCHRONIZE_CACHE: if (ata_try_flush_cache(dev)) return ata_scsi_flush_xlat; @@ -2744,6 +3369,11 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) case ATA_16: return ata_scsi_pass_thru; + case MODE_SELECT: + case MODE_SELECT_10: + return ata_scsi_mode_select_xlat; + break; + case START_STOP: return ata_scsi_start_stop_xlat; } @@ -2767,7 +3397,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, u8 *scsicmd = cmd->cmnd; DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", - ap->id, + ap->print_id, scsidev->channel, scsidev->id, scsidev->lun, scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3], scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7], @@ -2775,30 +3405,58 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, #endif } -static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *), +static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) { + u8 scsi_op = scmd->cmnd[0]; + ata_xlat_func_t xlat_func; int rc = 0; if (dev->class == ATA_DEV_ATA) { - ata_xlat_func_t xlat_func = ata_get_xlat_func(dev, - cmd->cmnd[0]); + if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) + goto bad_cdb_len; - if (xlat_func) - rc = ata_scsi_translate(dev, cmd, done, xlat_func); - else - ata_scsi_simulate(dev, cmd, done); - } else - rc = ata_scsi_translate(dev, cmd, done, atapi_xlat); + xlat_func = ata_get_xlat_func(dev, scsi_op); + } else { + if (unlikely(!scmd->cmd_len)) + goto bad_cdb_len; + + xlat_func = NULL; + if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { + /* relay SCSI command to ATAPI device */ + int len = COMMAND_SIZE(scsi_op); + if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) + goto bad_cdb_len; + + xlat_func = atapi_xlat; + } else { + /* ATA_16 passthru, treat as an ATA command */ + if (unlikely(scmd->cmd_len > 16)) + goto bad_cdb_len; + + xlat_func = ata_get_xlat_func(dev, scsi_op); + } + } + + if (xlat_func) + rc = ata_scsi_translate(dev, scmd, xlat_func); + else + ata_scsi_simulate(dev, scmd); return rc; + + bad_cdb_len: + DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", + scmd->cmd_len, scsi_op, dev->cdb_len); + scmd->result = DID_ERROR << 16; + scmd->scsi_done(scmd); + return 0; } /** * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device + * @shost: SCSI host of command to be sent * @cmd: SCSI command to be sent - * @done: Completion function, called when command is complete * * In some cases, this function translates SCSI commands into * ATA taskfiles, and queues the taskfiles to be sent to @@ -2808,37 +3466,36 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd, * ATA and ATAPI devices appearing as SCSI devices. * * LOCKING: - * Releases scsi-layer-held lock, and obtains host lock. + * ATA host lock * * RETURNS: * Return value from __ata_scsi_queuecmd() if @cmd can be queued, * 0 otherwise. */ -int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { struct ata_port *ap; struct ata_device *dev; struct scsi_device *scsidev = cmd->device; - struct Scsi_Host *shost = scsidev->host; int rc = 0; + unsigned long irq_flags; ap = ata_shost_to_port(shost); - spin_unlock(shost->host_lock); - spin_lock(ap->lock); + spin_lock_irqsave(ap->lock, irq_flags); ata_scsi_dump_cdb(ap, cmd); dev = ata_scsi_find_dev(ap, scsidev); if (likely(dev)) - rc = __ata_scsi_queuecmd(cmd, done, dev); + rc = __ata_scsi_queuecmd(cmd, dev); else { cmd->result = (DID_BAD_TARGET << 16); - done(cmd); + cmd->scsi_done(cmd); } - spin_unlock(ap->lock); - spin_lock(shost->host_lock); + spin_unlock_irqrestore(ap->lock, irq_flags); + return rc; } @@ -2846,7 +3503,6 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) * ata_scsi_simulate - simulate SCSI command on ATA device * @dev: the target device * @cmd: SCSI command being sent to device. - * @done: SCSI command completion function. * * Interprets and directly executes a select list of SCSI commands * that can be handled internally. @@ -2855,101 +3511,236 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) * spin_lock_irqsave(host lock) */ -void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) +void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) { struct ata_scsi_args args; const u8 *scsicmd = cmd->cmnd; + u8 tmp8; args.dev = dev; args.id = dev->id; args.cmd = cmd; - args.done = done; + args.done = cmd->scsi_done; switch(scsicmd[0]) { - /* no-op's, complete with success */ - case SYNCHRONIZE_CACHE: - case REZERO_UNIT: - case SEEK_6: - case SEEK_10: - case TEST_UNIT_READY: - case FORMAT_UNIT: /* FIXME: correct? */ - case SEND_DIAGNOSTIC: /* FIXME: correct? */ - ata_scsi_rbuf_fill(&args, ata_scsiop_noop); - break; + /* TODO: worth improving? */ + case FORMAT_UNIT: + ata_scsi_invalid_field(cmd); + break; - case INQUIRY: - if (scsicmd[1] & 2) /* is CmdDt set? */ - ata_scsi_invalid_field(cmd, done); - else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); - else if (scsicmd[2] == 0x00) - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); - else if (scsicmd[2] == 0x80) - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); - else if (scsicmd[2] == 0x83) - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); - else - ata_scsi_invalid_field(cmd, done); + case INQUIRY: + if (scsicmd[1] & 2) /* is CmdDt set? */ + ata_scsi_invalid_field(cmd); + else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); + else switch (scsicmd[2]) { + case 0x00: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); break; - - case MODE_SENSE: - case MODE_SENSE_10: - ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); + case 0x80: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); break; - - case MODE_SELECT: /* unconditionally return */ - case MODE_SELECT_10: /* bad-field-in-cdb */ - ata_scsi_invalid_field(cmd, done); + case 0x83: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); + break; + case 0x89: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); + break; + case 0xb0: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0); + break; + case 0xb1: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1); break; + case 0xb2: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2); + break; + default: + ata_scsi_invalid_field(cmd); + break; + } + break; + + case MODE_SENSE: + case MODE_SENSE_10: + ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); + break; + + case READ_CAPACITY: + ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); + break; - case READ_CAPACITY: + case SERVICE_ACTION_IN: + if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); - break; + else + ata_scsi_invalid_field(cmd); + break; - case SERVICE_ACTION_IN: - if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) - ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); - else - ata_scsi_invalid_field(cmd, done); - break; + case REPORT_LUNS: + ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); + break; - case REPORT_LUNS: - ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); - break; + case REQUEST_SENSE: + ata_scsi_set_sense(cmd, 0, 0, 0); + cmd->result = (DRIVER_SENSE << 24); + cmd->scsi_done(cmd); + break; + + /* if we reach this, then writeback caching is disabled, + * turning this into a no-op. + */ + case SYNCHRONIZE_CACHE: + /* fall through */ + + /* no-op's, complete with success */ + case REZERO_UNIT: + case SEEK_6: + case SEEK_10: + case TEST_UNIT_READY: + ata_scsi_rbuf_fill(&args, ata_scsiop_noop); + break; - /* mandatory commands we haven't implemented yet */ - case REQUEST_SENSE: + case SEND_DIAGNOSTIC: + tmp8 = scsicmd[1] & ~(1 << 3); + if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) + ata_scsi_rbuf_fill(&args, ata_scsiop_noop); + else + ata_scsi_invalid_field(cmd); + break; - /* all other commands */ - default: - ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); - /* "Invalid command operation code" */ - done(cmd); - break; + /* all other commands */ + default: + ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); + /* "Invalid command operation code" */ + cmd->scsi_done(cmd); + break; } } -void ata_scsi_scan_host(struct ata_port *ap) +int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) { - unsigned int i; + int i, rc; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + struct Scsi_Host *shost; + + rc = -ENOMEM; + shost = scsi_host_alloc(sht, sizeof(struct ata_port *)); + if (!shost) + goto err_alloc; + + shost->eh_noresume = 1; + *(struct ata_port **)&shost->hostdata[0] = ap; + ap->scsi_host = shost; + + shost->transportt = ata_scsi_transport_template; + shost->unique_id = ap->print_id; + shost->max_id = 16; + shost->max_lun = 1; + shost->max_channel = 1; + shost->max_cmd_len = 16; + shost->no_write_same = 1; + + /* Schedule policy is determined by ->qc_defer() + * callback and it needs to see every deferred qc. + * Set host_blocked to 1 to prevent SCSI midlayer from + * automatically deferring requests. + */ + shost->max_host_blocked = 1; - if (ap->flags & ATA_FLAG_DISABLED) - return; + rc = scsi_add_host_with_dma(ap->scsi_host, + &ap->tdev, ap->host->dev); + if (rc) + goto err_add; + } - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; - struct scsi_device *sdev; + return 0; - if (!ata_dev_enabled(dev) || dev->sdev) - continue; + err_add: + scsi_host_put(host->ports[i]->scsi_host); + err_alloc: + while (--i >= 0) { + struct Scsi_Host *shost = host->ports[i]->scsi_host; - sdev = __scsi_add_device(ap->scsi_host, 0, i, 0, NULL); - if (!IS_ERR(sdev)) { - dev->sdev = sdev; - scsi_device_put(sdev); + scsi_remove_host(shost); + scsi_host_put(shost); + } + return rc; +} + +void ata_scsi_scan_host(struct ata_port *ap, int sync) +{ + int tries = 5; + struct ata_device *last_failed_dev = NULL; + struct ata_link *link; + struct ata_device *dev; + + repeat: + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ENABLED) { + struct scsi_device *sdev; + int channel = 0, id = 0; + + if (dev->sdev) + continue; + + if (ata_is_host_link(link)) + id = dev->devno; + else + channel = link->pmp; + + sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, + NULL); + if (!IS_ERR(sdev)) { + dev->sdev = sdev; + scsi_device_put(sdev); + } else { + dev->sdev = NULL; + } } } + + /* If we scanned while EH was in progress or allocation + * failure occurred, scan would have failed silently. Check + * whether all devices are attached. + */ + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ENABLED) { + if (!dev->sdev) + goto exit_loop; + } + } + exit_loop: + if (!link) + return; + + /* we're missing some SCSI devices */ + if (sync) { + /* If caller requested synchrnous scan && we've made + * any progress, sleep briefly and repeat. + */ + if (dev != last_failed_dev) { + msleep(100); + last_failed_dev = dev; + goto repeat; + } + + /* We might be failing to detect boot device, give it + * a few more chances. + */ + if (--tries) { + msleep(100); + goto repeat; + } + + ata_port_err(ap, + "WARNING: synchronous SCSI scan failed without making any progress, switching to async\n"); + } + + queue_delayed_work(system_long_wq, &ap->hotplug_task, + round_jiffies_relative(HZ)); } /** @@ -2988,7 +3779,7 @@ int ata_scsi_offline_dev(struct ata_device *dev) */ static void ata_scsi_remove_dev(struct ata_device *dev) { - struct ata_port *ap = dev->ap; + struct ata_port *ap = dev->link->ap; struct scsi_device *sdev; unsigned long flags; @@ -3027,17 +3818,56 @@ static void ata_scsi_remove_dev(struct ata_device *dev) mutex_unlock(&ap->scsi_host->scan_mutex); if (sdev) { - ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n", - sdev->sdev_gendev.bus_id); + ata_dev_info(dev, "detaching (SCSI %s)\n", + dev_name(&sdev->sdev_gendev)); scsi_remove_device(sdev); scsi_device_put(sdev); } } +static void ata_scsi_handle_link_detach(struct ata_link *link) +{ + struct ata_port *ap = link->ap; + struct ata_device *dev; + + ata_for_each_dev(dev, link, ALL) { + unsigned long flags; + + if (!(dev->flags & ATA_DFLAG_DETACHED)) + continue; + + spin_lock_irqsave(ap->lock, flags); + dev->flags &= ~ATA_DFLAG_DETACHED; + spin_unlock_irqrestore(ap->lock, flags); + + if (zpodd_dev_enabled(dev)) + zpodd_exit(dev); + + ata_scsi_remove_dev(dev); + } +} + +/** + * ata_scsi_media_change_notify - send media change event + * @dev: Pointer to the disk device with media change event + * + * Tell the block layer to send a media change notification + * event. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_scsi_media_change_notify(struct ata_device *dev) +{ + if (dev->sdev) + sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE, + GFP_ATOMIC); +} + /** * ata_scsi_hotplug - SCSI part of hotplug - * @data: Pointer to ATA port to perform SCSI hotplug on + * @work: Pointer to ATA port to perform SCSI hotplug on * * Perform SCSI part of hotplug. It's executed from a separate * workqueue after EH completes. This is necessary because SCSI @@ -3047,9 +3877,10 @@ static void ata_scsi_remove_dev(struct ata_device *dev) * LOCKING: * Kernel thread context (may sleep). */ -void ata_scsi_hotplug(void *data) +void ata_scsi_hotplug(struct work_struct *work) { - struct ata_port *ap = data; + struct ata_port *ap = + container_of(work, struct ata_port, hotplug_task.work); int i; if (ap->pflags & ATA_PFLAG_UNLOADING) { @@ -3057,38 +3888,43 @@ void ata_scsi_hotplug(void *data) return; } - DPRINTK("ENTER\n"); - - /* unplug detached devices */ - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; - unsigned long flags; - - if (!(dev->flags & ATA_DFLAG_DETACHED)) - continue; + /* + * XXX - UGLY HACK + * + * The block layer suspend/resume path is fundamentally broken due + * to freezable kthreads and workqueue and may deadlock if a block + * device gets removed while resume is in progress. I don't know + * what the solution is short of removing freezable kthreads and + * workqueues altogether. + * + * The following is an ugly hack to avoid kicking off device + * removal while freezer is active. This is a joke but does avoid + * this particular deadlock scenario. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=62801 + * http://marc.info/?l=linux-kernel&m=138695698516487 + */ +#ifdef CONFIG_FREEZER + while (pm_freezing) + msleep(10); +#endif - spin_lock_irqsave(ap->lock, flags); - dev->flags &= ~ATA_DFLAG_DETACHED; - spin_unlock_irqrestore(ap->lock, flags); + DPRINTK("ENTER\n"); + mutex_lock(&ap->scsi_scan_mutex); - ata_scsi_remove_dev(dev); - } + /* Unplug detached devices. We cannot use link iterator here + * because PMP links have to be scanned even if PMP is + * currently not attached. Iterate manually. + */ + ata_scsi_handle_link_detach(&ap->link); + if (ap->pmp_link) + for (i = 0; i < SATA_PMP_MAX_PORTS; i++) + ata_scsi_handle_link_detach(&ap->pmp_link[i]); /* scan for new ones */ - ata_scsi_scan_host(ap); - - /* If we scanned while EH was in progress, scan would have - * failed silently. Requeue if there are enabled but - * unattached devices. - */ - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; - if (ata_dev_enabled(dev) && !dev->sdev) { - queue_delayed_work(ata_aux_wq, &ap->hotplug_task, HZ); - break; - } - } + ata_scsi_scan_host(ap, 0); + mutex_unlock(&ap->scsi_scan_mutex); DPRINTK("EXIT\n"); } @@ -3108,73 +3944,104 @@ void ata_scsi_hotplug(void *data) * RETURNS: * Zero. */ -static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, - unsigned int id, unsigned int lun) +int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, + unsigned int id, unsigned int lun) { struct ata_port *ap = ata_shost_to_port(shost); unsigned long flags; - int rc = 0; + int devno, rc = 0; if (!ap->ops->error_handler) return -EOPNOTSUPP; - if ((channel != SCAN_WILD_CARD && channel != 0) || - (lun != SCAN_WILD_CARD && lun != 0)) + if (lun != SCAN_WILD_CARD && lun) return -EINVAL; + if (!sata_pmp_attached(ap)) { + if (channel != SCAN_WILD_CARD && channel) + return -EINVAL; + devno = id; + } else { + if (id != SCAN_WILD_CARD && id) + return -EINVAL; + devno = channel; + } + spin_lock_irqsave(ap->lock, flags); - if (id == SCAN_WILD_CARD) { - ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1; - ap->eh_info.action |= ATA_EH_SOFTRESET; + if (devno == SCAN_WILD_CARD) { + struct ata_link *link; + + ata_for_each_link(link, ap, EDGE) { + struct ata_eh_info *ehi = &link->eh_info; + ehi->probe_mask |= ATA_ALL_DEVICES; + ehi->action |= ATA_EH_RESET; + } } else { - struct ata_device *dev = ata_find_dev(ap, id); + struct ata_device *dev = ata_find_dev(ap, devno); if (dev) { - ap->eh_info.probe_mask |= 1 << dev->devno; - ap->eh_info.action |= ATA_EH_SOFTRESET; - ap->eh_info.flags |= ATA_EHI_RESUME_LINK; + struct ata_eh_info *ehi = &dev->link->eh_info; + ehi->probe_mask |= 1 << dev->devno; + ehi->action |= ATA_EH_RESET; } else rc = -EINVAL; } - if (rc == 0) + if (rc == 0) { ata_port_schedule_eh(ap); - - spin_unlock_irqrestore(ap->lock, flags); + spin_unlock_irqrestore(ap->lock, flags); + ata_port_wait_eh(ap); + } else + spin_unlock_irqrestore(ap->lock, flags); return rc; } /** * ata_scsi_dev_rescan - initiate scsi_rescan_device() - * @data: Pointer to ATA port to perform scsi_rescan_device() + * @work: Pointer to ATA port to perform scsi_rescan_device() * * After ATA pass thru (SAT) commands are executed successfully, - * libata need to propagate the changes to SCSI layer. This - * function must be executed from ata_aux_wq such that sdev - * attach/detach don't race with rescan. + * libata need to propagate the changes to SCSI layer. * * LOCKING: * Kernel thread context (may sleep). */ -void ata_scsi_dev_rescan(void *data) +void ata_scsi_dev_rescan(struct work_struct *work) { - struct ata_port *ap = data; + struct ata_port *ap = + container_of(work, struct ata_port, scsi_rescan_task); + struct ata_link *link; struct ata_device *dev; - unsigned int i; + unsigned long flags; + + mutex_lock(&ap->scsi_scan_mutex); + spin_lock_irqsave(ap->lock, flags); + + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ENABLED) { + struct scsi_device *sdev = dev->sdev; - for (i = 0; i < ATA_MAX_DEVICES; i++) { - dev = &ap->device[i]; + if (!sdev) + continue; + if (scsi_device_get(sdev)) + continue; - if (ata_dev_enabled(dev) && dev->sdev) - scsi_rescan_device(&(dev->sdev->sdev_gendev)); + spin_unlock_irqrestore(ap->lock, flags); + scsi_rescan_device(&(sdev->sdev_gendev)); + scsi_device_put(sdev); + spin_lock_irqsave(ap->lock, flags); + } } + + spin_unlock_irqrestore(ap->lock, flags); + mutex_unlock(&ap->scsi_scan_mutex); } /** * ata_sas_port_alloc - Allocate port for a SAS attached SATA device - * @pdev: PCI device that the scsi device is attached to + * @host: ATA host container for all SAS ports * @port_info: Information from low-level host driver * @shost: SCSI host that the scsi device is attached to * @@ -3189,21 +4056,21 @@ struct ata_port *ata_sas_port_alloc(struct ata_host *host, struct ata_port_info *port_info, struct Scsi_Host *shost) { - struct ata_port *ap = kzalloc(sizeof(*ap), GFP_KERNEL); - struct ata_probe_ent *ent; + struct ata_port *ap; + ap = ata_port_alloc(host); if (!ap) return NULL; - ent = ata_probe_ent_alloc(host->dev, port_info); - if (!ent) { - kfree(ap); - return NULL; - } + ap->port_no = 0; + ap->lock = &host->lock; + ap->pio_mask = port_info->pio_mask; + ap->mwdma_mask = port_info->mwdma_mask; + ap->udma_mask = port_info->udma_mask; + ap->flags |= port_info->flags; + ap->ops = port_info->port_ops; + ap->cbl = ATA_CBL_SATA; - ata_port_init(ap, host, ent, 0); - ap->lock = shost->host_lock; - kfree(ent); return ap; } EXPORT_SYMBOL_GPL(ata_sas_port_alloc); @@ -3213,7 +4080,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc); * @ap: Port to initialize * * Called just after data structures for each port are - * initialized. Allocates DMA pad. + * initialized. * * May be used as the port_start() entry in ata_port_operations. * @@ -3222,7 +4089,13 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc); */ int ata_sas_port_start(struct ata_port *ap) { - return ata_pad_alloc(ap, ap->dev); + /* + * the port is marked as frozen at allocation time, but if we don't + * have new eh, we won't thaw it + */ + if (!ap->ops->error_handler) + ap->pflags &= ~ATA_PFLAG_FROZEN; + return 0; } EXPORT_SYMBOL_GPL(ata_sas_port_start); @@ -3230,8 +4103,6 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start); * ata_port_stop - Undo ata_sas_port_start() * @ap: Port to shut down * - * Frees the DMA pad. - * * May be used as the port_stop() entry in ata_port_operations. * * LOCKING: @@ -3240,11 +4111,30 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start); void ata_sas_port_stop(struct ata_port *ap) { - ata_pad_free(ap, ap->dev); } EXPORT_SYMBOL_GPL(ata_sas_port_stop); /** + * ata_sas_async_probe - simply schedule probing and return + * @ap: Port to probe + * + * For batch scheduling of probe for sas attached ata devices, assumes + * the port has already been through ata_sas_port_init() + */ +void ata_sas_async_probe(struct ata_port *ap) +{ + __ata_port_probe(ap); +} +EXPORT_SYMBOL_GPL(ata_sas_async_probe); + +int ata_sas_sync_probe(struct ata_port *ap) +{ + return ata_port_probe(ap); +} +EXPORT_SYMBOL_GPL(ata_sas_sync_probe); + + +/** * ata_sas_port_init - Initialize a SATA device * @ap: SATA port to initialize * @@ -3259,10 +4149,10 @@ int ata_sas_port_init(struct ata_port *ap) { int rc = ap->ops->port_start(ap); - if (!rc) - rc = ata_bus_probe(ap); - - return rc; + if (rc) + return rc; + ap->print_id = atomic_inc_return(&ata_print_id); + return 0; } EXPORT_SYMBOL_GPL(ata_sas_port_init); @@ -3274,7 +4164,8 @@ EXPORT_SYMBOL_GPL(ata_sas_port_init); void ata_sas_port_destroy(struct ata_port *ap) { - ap->ops->port_stop(ap); + if (ap->ops->port_stop) + ap->ops->port_stop(ap); kfree(ap); } EXPORT_SYMBOL_GPL(ata_sas_port_destroy); @@ -3291,7 +4182,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_destroy); int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) { ata_scsi_sdev_config(sdev); - ata_scsi_dev_config(sdev, ap->device); + ata_scsi_dev_config(sdev, ap->link.device); return 0; } EXPORT_SYMBOL_GPL(ata_sas_slave_configure); @@ -3299,24 +4190,25 @@ EXPORT_SYMBOL_GPL(ata_sas_slave_configure); /** * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device * @cmd: SCSI command to be sent - * @done: Completion function, called when command is complete * @ap: ATA port to which the command is being sent * * RETURNS: - * Zero. + * Return value from __ata_scsi_queuecmd() if @cmd can be queued, + * 0 otherwise. */ -int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), - struct ata_port *ap) +int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap) { + int rc = 0; + ata_scsi_dump_cdb(ap, cmd); - if (likely(ata_scsi_dev_enabled(ap->device))) - __ata_scsi_queuecmd(cmd, done, ap->device); + if (likely(ata_dev_enabled(ap->link.device))) + rc = __ata_scsi_queuecmd(cmd, ap->link.device); else { cmd->result = (DID_BAD_TARGET << 16); - done(cmd); + cmd->scsi_done(cmd); } - return 0; + return rc; } EXPORT_SYMBOL_GPL(ata_sas_queuecmd); diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 08b3a407473..1121153f1ec 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -1,7 +1,7 @@ /* - * libata-bmdma.c - helper library for PCI IDE BMDMA + * libata-sff.c - helper library for PCI IDE BMDMA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -33,97 +33,402 @@ */ #include <linux/kernel.h> +#include <linux/gfp.h> #include <linux/pci.h> +#include <linux/module.h> #include <linux/libata.h> +#include <linux/highmem.h> #include "libata.h" +static struct workqueue_struct *ata_sff_wq; + +const struct ata_port_operations ata_sff_port_ops = { + .inherits = &ata_base_port_ops, + + .qc_prep = ata_noop_qc_prep, + .qc_issue = ata_sff_qc_issue, + .qc_fill_rtf = ata_sff_qc_fill_rtf, + + .freeze = ata_sff_freeze, + .thaw = ata_sff_thaw, + .prereset = ata_sff_prereset, + .softreset = ata_sff_softreset, + .hardreset = sata_sff_hardreset, + .postreset = ata_sff_postreset, + .error_handler = ata_sff_error_handler, + + .sff_dev_select = ata_sff_dev_select, + .sff_check_status = ata_sff_check_status, + .sff_tf_load = ata_sff_tf_load, + .sff_tf_read = ata_sff_tf_read, + .sff_exec_command = ata_sff_exec_command, + .sff_data_xfer = ata_sff_data_xfer, + .sff_drain_fifo = ata_sff_drain_fifo, + + .lost_interrupt = ata_sff_lost_interrupt, +}; +EXPORT_SYMBOL_GPL(ata_sff_port_ops); + /** - * ata_tf_load_pio - send taskfile registers to host controller - * @ap: Port to which output is sent - * @tf: ATA taskfile register set + * ata_sff_check_status - Read device status reg & clear interrupt + * @ap: port where the device is * - * Outputs ATA taskfile to standard ATA host controller. + * Reads ATA taskfile status register for currently-selected device + * and return its value. This also clears pending interrupts + * from this device * * LOCKING: * Inherited from caller. */ +u8 ata_sff_check_status(struct ata_port *ap) +{ + return ioread8(ap->ioaddr.status_addr); +} +EXPORT_SYMBOL_GPL(ata_sff_check_status); -static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf) +/** + * ata_sff_altstatus - Read device alternate status reg + * @ap: port where the device is + * + * Reads ATA taskfile alternate status register for + * currently-selected device and return its value. + * + * Note: may NOT be used as the check_altstatus() entry in + * ata_port_operations. + * + * LOCKING: + * Inherited from caller. + */ +static u8 ata_sff_altstatus(struct ata_port *ap) { - struct ata_ioports *ioaddr = &ap->ioaddr; - unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + if (ap->ops->sff_check_altstatus) + return ap->ops->sff_check_altstatus(ap); - if (tf->ctl != ap->last_ctl) { - outb(tf->ctl, ioaddr->ctl_addr); - ap->last_ctl = tf->ctl; - ata_wait_idle(ap); + return ioread8(ap->ioaddr.altstatus_addr); +} + +/** + * ata_sff_irq_status - Check if the device is busy + * @ap: port where the device is + * + * Determine if the port is currently busy. Uses altstatus + * if available in order to avoid clearing shared IRQ status + * when finding an IRQ source. Non ctl capable devices don't + * share interrupt lines fortunately for us. + * + * LOCKING: + * Inherited from caller. + */ +static u8 ata_sff_irq_status(struct ata_port *ap) +{ + u8 status; + + if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { + status = ata_sff_altstatus(ap); + /* Not us: We are busy */ + if (status & ATA_BUSY) + return status; } + /* Clear INTRQ latch */ + status = ap->ops->sff_check_status(ap); + return status; +} - if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { - outb(tf->hob_feature, ioaddr->feature_addr); - outb(tf->hob_nsect, ioaddr->nsect_addr); - outb(tf->hob_lbal, ioaddr->lbal_addr); - outb(tf->hob_lbam, ioaddr->lbam_addr); - outb(tf->hob_lbah, ioaddr->lbah_addr); - VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", - tf->hob_feature, - tf->hob_nsect, - tf->hob_lbal, - tf->hob_lbam, - tf->hob_lbah); +/** + * ata_sff_sync - Flush writes + * @ap: Port to wait for. + * + * CAUTION: + * If we have an mmio device with no ctl and no altstatus + * method this will fail. No such devices are known to exist. + * + * LOCKING: + * Inherited from caller. + */ + +static void ata_sff_sync(struct ata_port *ap) +{ + if (ap->ops->sff_check_altstatus) + ap->ops->sff_check_altstatus(ap); + else if (ap->ioaddr.altstatus_addr) + ioread8(ap->ioaddr.altstatus_addr); +} + +/** + * ata_sff_pause - Flush writes and wait 400nS + * @ap: Port to pause for. + * + * CAUTION: + * If we have an mmio device with no ctl and no altstatus + * method this will fail. No such devices are known to exist. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_sff_pause(struct ata_port *ap) +{ + ata_sff_sync(ap); + ndelay(400); +} +EXPORT_SYMBOL_GPL(ata_sff_pause); + +/** + * ata_sff_dma_pause - Pause before commencing DMA + * @ap: Port to pause for. + * + * Perform I/O fencing and ensure sufficient cycle delays occur + * for the HDMA1:0 transition + */ + +void ata_sff_dma_pause(struct ata_port *ap) +{ + if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { + /* An altstatus read will cause the needed delay without + messing up the IRQ status */ + ata_sff_altstatus(ap); + return; } + /* There are no DMA controllers without ctl. BUG here to ensure + we never violate the HDMA1:0 transition timing and risk + corruption. */ + BUG(); +} +EXPORT_SYMBOL_GPL(ata_sff_dma_pause); - if (is_addr) { - outb(tf->feature, ioaddr->feature_addr); - outb(tf->nsect, ioaddr->nsect_addr); - outb(tf->lbal, ioaddr->lbal_addr); - outb(tf->lbam, ioaddr->lbam_addr); - outb(tf->lbah, ioaddr->lbah_addr); - VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", - tf->feature, - tf->nsect, - tf->lbal, - tf->lbam, - tf->lbah); +/** + * ata_sff_busy_sleep - sleep until BSY clears, or timeout + * @ap: port containing status register to be polled + * @tmout_pat: impatience timeout in msecs + * @tmout: overall timeout in msecs + * + * Sleep until ATA Status register bit BSY clears, + * or a timeout occurs. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int ata_sff_busy_sleep(struct ata_port *ap, + unsigned long tmout_pat, unsigned long tmout) +{ + unsigned long timer_start, timeout; + u8 status; + + status = ata_sff_busy_wait(ap, ATA_BUSY, 300); + timer_start = jiffies; + timeout = ata_deadline(timer_start, tmout_pat); + while (status != 0xff && (status & ATA_BUSY) && + time_before(jiffies, timeout)) { + ata_msleep(ap, 50); + status = ata_sff_busy_wait(ap, ATA_BUSY, 3); } - if (tf->flags & ATA_TFLAG_DEVICE) { - outb(tf->device, ioaddr->device_addr); - VPRINTK("device 0x%X\n", tf->device); + if (status != 0xff && (status & ATA_BUSY)) + ata_port_warn(ap, + "port is slow to respond, please be patient (Status 0x%x)\n", + status); + + timeout = ata_deadline(timer_start, tmout); + while (status != 0xff && (status & ATA_BUSY) && + time_before(jiffies, timeout)) { + ata_msleep(ap, 50); + status = ap->ops->sff_check_status(ap); + } + + if (status == 0xff) + return -ENODEV; + + if (status & ATA_BUSY) { + ata_port_err(ap, + "port failed to respond (%lu secs, Status 0x%x)\n", + DIV_ROUND_UP(tmout, 1000), status); + return -EBUSY; } + return 0; +} +EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); + +static int ata_sff_check_ready(struct ata_link *link) +{ + u8 status = link->ap->ops->sff_check_status(link->ap); + + return ata_check_ready(status); +} + +/** + * ata_sff_wait_ready - sleep until BSY clears, or timeout + * @link: SFF link to wait ready status for + * @deadline: deadline jiffies for the operation + * + * Sleep until ATA Status register bit BSY clears, or timeout + * occurs. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) +{ + return ata_wait_ready(link, deadline, ata_sff_check_ready); +} +EXPORT_SYMBOL_GPL(ata_sff_wait_ready); + +/** + * ata_sff_set_devctl - Write device control reg + * @ap: port where the device is + * @ctl: value to write + * + * Writes ATA taskfile device control register. + * + * Note: may NOT be used as the sff_set_devctl() entry in + * ata_port_operations. + * + * LOCKING: + * Inherited from caller. + */ +static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl) +{ + if (ap->ops->sff_set_devctl) + ap->ops->sff_set_devctl(ap, ctl); + else + iowrite8(ctl, ap->ioaddr.ctl_addr); +} + +/** + * ata_sff_dev_select - Select device 0/1 on ATA bus + * @ap: ATA channel to manipulate + * @device: ATA device (numbered from zero) to select + * + * Use the method defined in the ATA specification to + * make either device 0, or device 1, active on the + * ATA channel. Works with both PIO and MMIO. + * + * May be used as the dev_select() entry in ata_port_operations. + * + * LOCKING: + * caller. + */ +void ata_sff_dev_select(struct ata_port *ap, unsigned int device) +{ + u8 tmp; + + if (device == 0) + tmp = ATA_DEVICE_OBS; + else + tmp = ATA_DEVICE_OBS | ATA_DEV1; + + iowrite8(tmp, ap->ioaddr.device_addr); + ata_sff_pause(ap); /* needed; also flushes, for mmio */ +} +EXPORT_SYMBOL_GPL(ata_sff_dev_select); + +/** + * ata_dev_select - Select device 0/1 on ATA bus + * @ap: ATA channel to manipulate + * @device: ATA device (numbered from zero) to select + * @wait: non-zero to wait for Status register BSY bit to clear + * @can_sleep: non-zero if context allows sleeping + * + * Use the method defined in the ATA specification to + * make either device 0, or device 1, active on the + * ATA channel. + * + * This is a high-level version of ata_sff_dev_select(), which + * additionally provides the services of inserting the proper + * pauses and status polling, where needed. + * + * LOCKING: + * caller. + */ +static void ata_dev_select(struct ata_port *ap, unsigned int device, + unsigned int wait, unsigned int can_sleep) +{ + if (ata_msg_probe(ap)) + ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n", + device, wait); + + if (wait) + ata_wait_idle(ap); + + ap->ops->sff_dev_select(ap, device); + + if (wait) { + if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) + ata_msleep(ap, 150); + ata_wait_idle(ap); + } +} + +/** + * ata_sff_irq_on - Enable interrupts on a port. + * @ap: Port on which interrupts are enabled. + * + * Enable interrupts on a legacy IDE device using MMIO or PIO, + * wait for idle, clear any pending interrupts. + * + * Note: may NOT be used as the sff_irq_on() entry in + * ata_port_operations. + * + * LOCKING: + * Inherited from caller. + */ +void ata_sff_irq_on(struct ata_port *ap) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + if (ap->ops->sff_irq_on) { + ap->ops->sff_irq_on(ap); + return; + } + + ap->ctl &= ~ATA_NIEN; + ap->last_ctl = ap->ctl; + + if (ap->ops->sff_set_devctl || ioaddr->ctl_addr) + ata_sff_set_devctl(ap, ap->ctl); ata_wait_idle(ap); + + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } +EXPORT_SYMBOL_GPL(ata_sff_irq_on); /** - * ata_tf_load_mmio - send taskfile registers to host controller + * ata_sff_tf_load - send taskfile registers to host controller * @ap: Port to which output is sent * @tf: ATA taskfile register set * - * Outputs ATA taskfile to standard ATA host controller using MMIO. + * Outputs ATA taskfile to standard ATA host controller. * * LOCKING: * Inherited from caller. */ - -static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) +void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; if (tf->ctl != ap->last_ctl) { - writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr); + if (ioaddr->ctl_addr) + iowrite8(tf->ctl, ioaddr->ctl_addr); ap->last_ctl = tf->ctl; ata_wait_idle(ap); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { - writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr); - writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr); - writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr); - writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr); - writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr); + WARN_ON_ONCE(!ioaddr->ctl_addr); + iowrite8(tf->hob_feature, ioaddr->feature_addr); + iowrite8(tf->hob_nsect, ioaddr->nsect_addr); + iowrite8(tf->hob_lbal, ioaddr->lbal_addr); + iowrite8(tf->hob_lbam, ioaddr->lbam_addr); + iowrite8(tf->hob_lbah, ioaddr->lbah_addr); VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", tf->hob_feature, tf->hob_nsect, @@ -133,11 +438,11 @@ static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) } if (is_addr) { - writeb(tf->feature, (void __iomem *) ioaddr->feature_addr); - writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr); - writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr); - writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr); - writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr); + iowrite8(tf->feature, ioaddr->feature_addr); + iowrite8(tf->nsect, ioaddr->nsect_addr); + iowrite8(tf->lbal, ioaddr->lbal_addr); + iowrite8(tf->lbam, ioaddr->lbam_addr); + iowrite8(tf->lbah, ioaddr->lbah_addr); VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", tf->feature, tf->nsect, @@ -147,788 +452,2118 @@ static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) } if (tf->flags & ATA_TFLAG_DEVICE) { - writeb(tf->device, (void __iomem *) ioaddr->device_addr); + iowrite8(tf->device, ioaddr->device_addr); VPRINTK("device 0x%X\n", tf->device); } ata_wait_idle(ap); } - +EXPORT_SYMBOL_GPL(ata_sff_tf_load); /** - * ata_tf_load - send taskfile registers to host controller - * @ap: Port to which output is sent - * @tf: ATA taskfile register set - * - * Outputs ATA taskfile to standard ATA host controller using MMIO - * or PIO as indicated by the ATA_FLAG_MMIO flag. - * Writes the control, feature, nsect, lbal, lbam, and lbah registers. - * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect, - * hob_lbal, hob_lbam, and hob_lbah. - * - * This function waits for idle (!BUSY and !DRQ) after writing - * registers. If the control register has a new value, this - * function also waits for idle after writing control and before - * writing the remaining registers. + * ata_sff_tf_read - input device's ATA taskfile shadow registers + * @ap: Port from which input is read + * @tf: ATA taskfile register set for storing input * - * May be used as the tf_load() entry in ata_port_operations. + * Reads ATA taskfile registers for currently-selected device + * into @tf. Assumes the device has a fully SFF compliant task file + * layout and behaviour. If you device does not (eg has a different + * status method) then you will need to provide a replacement tf_read * * LOCKING: * Inherited from caller. */ -void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { - if (ap->flags & ATA_FLAG_MMIO) - ata_tf_load_mmio(ap, tf); - else - ata_tf_load_pio(ap, tf); + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->command = ata_sff_check_status(ap); + tf->feature = ioread8(ioaddr->error_addr); + tf->nsect = ioread8(ioaddr->nsect_addr); + tf->lbal = ioread8(ioaddr->lbal_addr); + tf->lbam = ioread8(ioaddr->lbam_addr); + tf->lbah = ioread8(ioaddr->lbah_addr); + tf->device = ioread8(ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + if (likely(ioaddr->ctl_addr)) { + iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); + tf->hob_feature = ioread8(ioaddr->error_addr); + tf->hob_nsect = ioread8(ioaddr->nsect_addr); + tf->hob_lbal = ioread8(ioaddr->lbal_addr); + tf->hob_lbam = ioread8(ioaddr->lbam_addr); + tf->hob_lbah = ioread8(ioaddr->lbah_addr); + iowrite8(tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; + } else + WARN_ON_ONCE(1); + } } +EXPORT_SYMBOL_GPL(ata_sff_tf_read); /** - * ata_exec_command_pio - issue ATA command to host controller + * ata_sff_exec_command - issue ATA command to host controller * @ap: port to which command is being issued * @tf: ATA taskfile register set * - * Issues PIO write to ATA command register, with proper - * synchronization with interrupt handler / other threads. + * Issues ATA command, with proper synchronization with interrupt + * handler / other threads. * * LOCKING: * spin_lock_irqsave(host lock) */ - -static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf) +void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) { - DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); - outb(tf->command, ap->ioaddr.command_addr); - ata_pause(ap); + iowrite8(tf->command, ap->ioaddr.command_addr); + ata_sff_pause(ap); } - +EXPORT_SYMBOL_GPL(ata_sff_exec_command); /** - * ata_exec_command_mmio - issue ATA command to host controller + * ata_tf_to_host - issue ATA taskfile to host controller * @ap: port to which command is being issued * @tf: ATA taskfile register set * - * Issues MMIO write to ATA command register, with proper - * synchronization with interrupt handler / other threads. - * - * FIXME: missing write posting for 400nS delay enforcement + * Issues ATA taskfile register set to ATA host controller, + * with proper synchronization with interrupt handler and + * other threads. * * LOCKING: * spin_lock_irqsave(host lock) */ - -static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) +static inline void ata_tf_to_host(struct ata_port *ap, + const struct ata_taskfile *tf) { - DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); - - writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr); - ata_pause(ap); + ap->ops->sff_tf_load(ap, tf); + ap->ops->sff_exec_command(ap, tf); } - /** - * ata_exec_command - issue ATA command to host controller - * @ap: port to which command is being issued - * @tf: ATA taskfile register set + * ata_sff_data_xfer - Transfer data by PIO + * @dev: device to target + * @buf: data buffer + * @buflen: buffer length + * @rw: read/write * - * Issues PIO/MMIO write to ATA command register, with proper - * synchronization with interrupt handler / other threads. + * Transfer data from/to the device data register by PIO. * * LOCKING: - * spin_lock_irqsave(host lock) + * Inherited from caller. + * + * RETURNS: + * Bytes consumed. */ -void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) +unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, + unsigned int buflen, int rw) { - if (ap->flags & ATA_FLAG_MMIO) - ata_exec_command_mmio(ap, tf); + struct ata_port *ap = dev->link->ap; + void __iomem *data_addr = ap->ioaddr.data_addr; + unsigned int words = buflen >> 1; + + /* Transfer multiple of 2 bytes */ + if (rw == READ) + ioread16_rep(data_addr, buf, words); else - ata_exec_command_pio(ap, tf); + iowrite16_rep(data_addr, buf, words); + + /* Transfer trailing byte, if any. */ + if (unlikely(buflen & 0x01)) { + unsigned char pad[2] = { }; + + /* Point buf to the tail of buffer */ + buf += buflen - 1; + + /* + * Use io*16_rep() accessors here as well to avoid pointlessly + * swapping bytes to and from on the big endian machines... + */ + if (rw == READ) { + ioread16_rep(data_addr, pad, 1); + *buf = pad[0]; + } else { + pad[0] = *buf; + iowrite16_rep(data_addr, pad, 1); + } + words++; + } + + return words << 1; } +EXPORT_SYMBOL_GPL(ata_sff_data_xfer); /** - * ata_tf_read_pio - input device's ATA taskfile shadow registers - * @ap: Port from which input is read - * @tf: ATA taskfile register set for storing input + * ata_sff_data_xfer32 - Transfer data by PIO + * @dev: device to target + * @buf: data buffer + * @buflen: buffer length + * @rw: read/write * - * Reads ATA taskfile registers for currently-selected device - * into @tf. + * Transfer data from/to the device data register by PIO using 32bit + * I/O operations. * * LOCKING: * Inherited from caller. + * + * RETURNS: + * Bytes consumed. */ -static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf) +unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf, + unsigned int buflen, int rw) { - struct ata_ioports *ioaddr = &ap->ioaddr; + struct ata_port *ap = dev->link->ap; + void __iomem *data_addr = ap->ioaddr.data_addr; + unsigned int words = buflen >> 2; + int slop = buflen & 3; - tf->command = ata_check_status(ap); - tf->feature = inb(ioaddr->error_addr); - tf->nsect = inb(ioaddr->nsect_addr); - tf->lbal = inb(ioaddr->lbal_addr); - tf->lbam = inb(ioaddr->lbam_addr); - tf->lbah = inb(ioaddr->lbah_addr); - tf->device = inb(ioaddr->device_addr); + if (!(ap->pflags & ATA_PFLAG_PIO32)) + return ata_sff_data_xfer(dev, buf, buflen, rw); - if (tf->flags & ATA_TFLAG_LBA48) { - outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr); - tf->hob_feature = inb(ioaddr->error_addr); - tf->hob_nsect = inb(ioaddr->nsect_addr); - tf->hob_lbal = inb(ioaddr->lbal_addr); - tf->hob_lbam = inb(ioaddr->lbam_addr); - tf->hob_lbah = inb(ioaddr->lbah_addr); + /* Transfer multiple of 4 bytes */ + if (rw == READ) + ioread32_rep(data_addr, buf, words); + else + iowrite32_rep(data_addr, buf, words); + + /* Transfer trailing bytes, if any */ + if (unlikely(slop)) { + unsigned char pad[4] = { }; + + /* Point buf to the tail of buffer */ + buf += buflen - slop; + + /* + * Use io*_rep() accessors here as well to avoid pointlessly + * swapping bytes to and from on the big endian machines... + */ + if (rw == READ) { + if (slop < 3) + ioread16_rep(data_addr, pad, 1); + else + ioread32_rep(data_addr, pad, 1); + memcpy(buf, pad, slop); + } else { + memcpy(pad, buf, slop); + if (slop < 3) + iowrite16_rep(data_addr, pad, 1); + else + iowrite32_rep(data_addr, pad, 1); + } } + return (buflen + 1) & ~1; } +EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); /** - * ata_tf_read_mmio - input device's ATA taskfile shadow registers - * @ap: Port from which input is read - * @tf: ATA taskfile register set for storing input + * ata_sff_data_xfer_noirq - Transfer data by PIO + * @dev: device to target + * @buf: data buffer + * @buflen: buffer length + * @rw: read/write * - * Reads ATA taskfile registers for currently-selected device - * into @tf via MMIO. + * Transfer data from/to the device data register by PIO. Do the + * transfer with interrupts disabled. * * LOCKING: * Inherited from caller. + * + * RETURNS: + * Bytes consumed. */ - -static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf) +unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, + unsigned int buflen, int rw) { - struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned long flags; + unsigned int consumed; - tf->command = ata_check_status(ap); - tf->feature = readb((void __iomem *)ioaddr->error_addr); - tf->nsect = readb((void __iomem *)ioaddr->nsect_addr); - tf->lbal = readb((void __iomem *)ioaddr->lbal_addr); - tf->lbam = readb((void __iomem *)ioaddr->lbam_addr); - tf->lbah = readb((void __iomem *)ioaddr->lbah_addr); - tf->device = readb((void __iomem *)ioaddr->device_addr); + local_irq_save(flags); + consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); + local_irq_restore(flags); - if (tf->flags & ATA_TFLAG_LBA48) { - writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr); - tf->hob_feature = readb((void __iomem *)ioaddr->error_addr); - tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr); - tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr); - tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr); - tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr); - } + return consumed; } - +EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); /** - * ata_tf_read - input device's ATA taskfile shadow registers - * @ap: Port from which input is read - * @tf: ATA taskfile register set for storing input - * - * Reads ATA taskfile registers for currently-selected device - * into @tf. + * ata_pio_sector - Transfer a sector of data. + * @qc: Command on going * - * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48 - * is set, also reads the hob registers. - * - * May be used as the tf_read() entry in ata_port_operations. + * Transfer qc->sect_size bytes of data from/to the ATA device. * * LOCKING: * Inherited from caller. */ -void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +static void ata_pio_sector(struct ata_queued_cmd *qc) { - if (ap->flags & ATA_FLAG_MMIO) - ata_tf_read_mmio(ap, tf); - else - ata_tf_read_pio(ap, tf); + int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); + struct ata_port *ap = qc->ap; + struct page *page; + unsigned int offset; + unsigned char *buf; + + if (qc->curbytes == qc->nbytes - qc->sect_size) + ap->hsm_task_state = HSM_ST_LAST; + + page = sg_page(qc->cursg); + offset = qc->cursg->offset + qc->cursg_ofs; + + /* get the current page and offset */ + page = nth_page(page, (offset >> PAGE_SHIFT)); + offset %= PAGE_SIZE; + + DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + + if (PageHighMem(page)) { + unsigned long flags; + + /* FIXME: use a bounce buffer */ + local_irq_save(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ + ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, + do_write); + + kunmap_atomic(buf); + local_irq_restore(flags); + } else { + buf = page_address(page); + ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, + do_write); + } + + if (!do_write && !PageSlab(page)) + flush_dcache_page(page); + + qc->curbytes += qc->sect_size; + qc->cursg_ofs += qc->sect_size; + + if (qc->cursg_ofs == qc->cursg->length) { + qc->cursg = sg_next(qc->cursg); + qc->cursg_ofs = 0; + } } /** - * ata_check_status_pio - Read device status reg & clear interrupt - * @ap: port where the device is + * ata_pio_sectors - Transfer one or many sectors. + * @qc: Command on going * - * Reads ATA taskfile status register for currently-selected device - * and return its value. This also clears pending interrupts - * from this device + * Transfer one or many sectors of data from/to the + * ATA device for the DRQ request. * * LOCKING: * Inherited from caller. */ -static u8 ata_check_status_pio(struct ata_port *ap) +static void ata_pio_sectors(struct ata_queued_cmd *qc) { - return inb(ap->ioaddr.status_addr); + if (is_multi_taskfile(&qc->tf)) { + /* READ/WRITE MULTIPLE */ + unsigned int nsect; + + WARN_ON_ONCE(qc->dev->multi_count == 0); + + nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, + qc->dev->multi_count); + while (nsect--) + ata_pio_sector(qc); + } else + ata_pio_sector(qc); + + ata_sff_sync(qc->ap); /* flush */ } /** - * ata_check_status_mmio - Read device status reg & clear interrupt - * @ap: port where the device is + * atapi_send_cdb - Write CDB bytes to hardware + * @ap: Port to which ATAPI device is attached. + * @qc: Taskfile currently active * - * Reads ATA taskfile status register for currently-selected device - * via MMIO and return its value. This also clears pending interrupts - * from this device + * When device has indicated its readiness to accept + * a CDB, this function is called. Send the CDB. * * LOCKING: - * Inherited from caller. + * caller. */ -static u8 ata_check_status_mmio(struct ata_port *ap) +static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) { - return readb((void __iomem *) ap->ioaddr.status_addr); + /* send SCSI cdb */ + DPRINTK("send cdb\n"); + WARN_ON_ONCE(qc->dev->cdb_len < 12); + + ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); + ata_sff_sync(ap); + /* FIXME: If the CDB is for DMA do we need to do the transition delay + or is bmdma_start guaranteed to do it ? */ + switch (qc->tf.protocol) { + case ATAPI_PROT_PIO: + ap->hsm_task_state = HSM_ST; + break; + case ATAPI_PROT_NODATA: + ap->hsm_task_state = HSM_ST_LAST; + break; +#ifdef CONFIG_ATA_BMDMA + case ATAPI_PROT_DMA: + ap->hsm_task_state = HSM_ST_LAST; + /* initiate bmdma */ + ap->ops->bmdma_start(qc); + break; +#endif /* CONFIG_ATA_BMDMA */ + default: + BUG(); + } } - /** - * ata_check_status - Read device status reg & clear interrupt - * @ap: port where the device is + * __atapi_pio_bytes - Transfer data from/to the ATAPI device. + * @qc: Command on going + * @bytes: number of bytes * - * Reads ATA taskfile status register for currently-selected device - * and return its value. This also clears pending interrupts - * from this device - * - * May be used as the check_status() entry in ata_port_operations. + * Transfer Transfer data from/to the ATAPI device. * * LOCKING: * Inherited from caller. + * */ -u8 ata_check_status(struct ata_port *ap) +static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) { - if (ap->flags & ATA_FLAG_MMIO) - return ata_check_status_mmio(ap); - return ata_check_status_pio(ap); -} + int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; + struct ata_port *ap = qc->ap; + struct ata_device *dev = qc->dev; + struct ata_eh_info *ehi = &dev->link->eh_info; + struct scatterlist *sg; + struct page *page; + unsigned char *buf; + unsigned int offset, count, consumed; + +next_sg: + sg = qc->cursg; + if (unlikely(!sg)) { + ata_ehi_push_desc(ehi, "unexpected or too much trailing data " + "buf=%u cur=%u bytes=%u", + qc->nbytes, qc->curbytes, bytes); + return -1; + } + + page = sg_page(sg); + offset = sg->offset + qc->cursg_ofs; + /* get the current page and offset */ + page = nth_page(page, (offset >> PAGE_SHIFT)); + offset %= PAGE_SIZE; + + /* don't overrun current sg */ + count = min(sg->length - qc->cursg_ofs, bytes); + + /* don't cross page boundaries */ + count = min(count, (unsigned int)PAGE_SIZE - offset); + + DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + + if (PageHighMem(page)) { + unsigned long flags; + + /* FIXME: use bounce buffer */ + local_irq_save(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ + consumed = ap->ops->sff_data_xfer(dev, buf + offset, + count, rw); + + kunmap_atomic(buf); + local_irq_restore(flags); + } else { + buf = page_address(page); + consumed = ap->ops->sff_data_xfer(dev, buf + offset, + count, rw); + } + + bytes -= min(bytes, consumed); + qc->curbytes += count; + qc->cursg_ofs += count; + + if (qc->cursg_ofs == sg->length) { + qc->cursg = sg_next(qc->cursg); + qc->cursg_ofs = 0; + } + + /* + * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed); + * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN + * check correctly as it doesn't know if it is the last request being + * made. Somebody should implement a proper sanity check. + */ + if (bytes) + goto next_sg; + return 0; +} /** - * ata_altstatus - Read device alternate status reg - * @ap: port where the device is - * - * Reads ATA taskfile alternate status register for - * currently-selected device and return its value. + * atapi_pio_bytes - Transfer data from/to the ATAPI device. + * @qc: Command on going * - * Note: may NOT be used as the check_altstatus() entry in - * ata_port_operations. + * Transfer Transfer data from/to the ATAPI device. * * LOCKING: * Inherited from caller. */ -u8 ata_altstatus(struct ata_port *ap) +static void atapi_pio_bytes(struct ata_queued_cmd *qc) { - if (ap->ops->check_altstatus) - return ap->ops->check_altstatus(ap); + struct ata_port *ap = qc->ap; + struct ata_device *dev = qc->dev; + struct ata_eh_info *ehi = &dev->link->eh_info; + unsigned int ireason, bc_lo, bc_hi, bytes; + int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; + + /* Abuse qc->result_tf for temp storage of intermediate TF + * here to save some kernel stack usage. + * For normal completion, qc->result_tf is not relevant. For + * error, qc->result_tf is later overwritten by ata_qc_complete(). + * So, the correctness of qc->result_tf is not affected. + */ + ap->ops->sff_tf_read(ap, &qc->result_tf); + ireason = qc->result_tf.nsect; + bc_lo = qc->result_tf.lbam; + bc_hi = qc->result_tf.lbah; + bytes = (bc_hi << 8) | bc_lo; + + /* shall be cleared to zero, indicating xfer of data */ + if (unlikely(ireason & ATAPI_COD)) + goto atapi_check; + + /* make sure transfer direction matches expected */ + i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0; + if (unlikely(do_write != i_write)) + goto atapi_check; + + if (unlikely(!bytes)) + goto atapi_check; + + VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); - if (ap->flags & ATA_FLAG_MMIO) - return readb((void __iomem *)ap->ioaddr.altstatus_addr); - return inb(ap->ioaddr.altstatus_addr); + if (unlikely(__atapi_pio_bytes(qc, bytes))) + goto err_out; + ata_sff_sync(ap); /* flush */ + + return; + + atapi_check: + ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)", + ireason, bytes); + err_out: + qc->err_mask |= AC_ERR_HSM; + ap->hsm_task_state = HSM_ST_ERR; } /** - * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. + * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. + * @ap: the target ata_port + * @qc: qc on going * - * LOCKING: - * spin_lock_irqsave(host lock) + * RETURNS: + * 1 if ok in workqueue, 0 otherwise. */ - -static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) +static inline int ata_hsm_ok_in_wq(struct ata_port *ap, + struct ata_queued_cmd *qc) { - struct ata_port *ap = qc->ap; - unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); - u8 dmactl; - void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; + if (qc->tf.flags & ATA_TFLAG_POLLING) + return 1; - /* load PRD table addr. */ - mb(); /* make sure PRD table writes are visible to controller */ - writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); + if (ap->hsm_task_state == HSM_ST_FIRST) { + if (qc->tf.protocol == ATA_PROT_PIO && + (qc->tf.flags & ATA_TFLAG_WRITE)) + return 1; - /* specify data direction, triple-check start bit is clear */ - dmactl = readb(mmio + ATA_DMA_CMD); - dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); - if (!rw) - dmactl |= ATA_DMA_WR; - writeb(dmactl, mmio + ATA_DMA_CMD); + if (ata_is_atapi(qc->tf.protocol) && + !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) + return 1; + } - /* issue r/w command */ - ap->ops->exec_command(ap, &qc->tf); + return 0; } /** - * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. + * ata_hsm_qc_complete - finish a qc running on standard HSM + * @qc: Command to complete + * @in_wq: 1 if called from workqueue, 0 otherwise + * + * Finish @qc which is running on standard HSM. * * LOCKING: - * spin_lock_irqsave(host lock) + * If @in_wq is zero, spin_lock_irqsave(host lock). + * Otherwise, none on entry and grabs host lock. */ - -static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) +static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) { struct ata_port *ap = qc->ap; - void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; - u8 dmactl; + unsigned long flags; - /* start host DMA transaction */ - dmactl = readb(mmio + ATA_DMA_CMD); - writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); + if (ap->ops->error_handler) { + if (in_wq) { + spin_lock_irqsave(ap->lock, flags); + + /* EH might have kicked in while host lock is + * released. + */ + qc = ata_qc_from_tag(ap, qc->tag); + if (qc) { + if (likely(!(qc->err_mask & AC_ERR_HSM))) { + ata_sff_irq_on(ap); + ata_qc_complete(qc); + } else + ata_port_freeze(ap); + } - /* Strictly, one may wish to issue a readb() here, to - * flush the mmio write. However, control also passes - * to the hardware at this point, and it will interrupt - * us when we are to resume control. So, in effect, - * we don't care when the mmio write flushes. - * Further, a read of the DMA status register _immediately_ - * following the write may not be what certain flaky hardware - * is expected, so I think it is best to not add a readb() - * without first all the MMIO ATA cards/mobos. - * Or maybe I'm just being paranoid. + spin_unlock_irqrestore(ap->lock, flags); + } else { + if (likely(!(qc->err_mask & AC_ERR_HSM))) + ata_qc_complete(qc); + else + ata_port_freeze(ap); + } + } else { + if (in_wq) { + spin_lock_irqsave(ap->lock, flags); + ata_sff_irq_on(ap); + ata_qc_complete(qc); + spin_unlock_irqrestore(ap->lock, flags); + } else + ata_qc_complete(qc); + } +} + +/** + * ata_sff_hsm_move - move the HSM to the next state. + * @ap: the target ata_port + * @qc: qc on going + * @status: current device status + * @in_wq: 1 if called from workqueue, 0 otherwise + * + * RETURNS: + * 1 when poll next status needed, 0 otherwise. + */ +int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, + u8 status, int in_wq) +{ + struct ata_link *link = qc->dev->link; + struct ata_eh_info *ehi = &link->eh_info; + unsigned long flags = 0; + int poll_next; + + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); + + /* Make sure ata_sff_qc_issue() does not throw things + * like DMA polling into the workqueue. Notice that + * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). */ + WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); + +fsm_start: + DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", + ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); + + switch (ap->hsm_task_state) { + case HSM_ST_FIRST: + /* Send first data block or PACKET CDB */ + + /* If polling, we will stay in the work queue after + * sending the data. Otherwise, interrupt handler + * takes over after sending the data. + */ + poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); + + /* check device status */ + if (unlikely((status & ATA_DRQ) == 0)) { + /* handle BSY=0, DRQ=0 as error */ + if (likely(status & (ATA_ERR | ATA_DF))) + /* device stops HSM for abort/error */ + qc->err_mask |= AC_ERR_DEV; + else { + /* HSM violation. Let EH handle this */ + ata_ehi_push_desc(ehi, + "ST_FIRST: !(DRQ|ERR|DF)"); + qc->err_mask |= AC_ERR_HSM; + } + + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + /* Device should not ask for data transfer (DRQ=1) + * when it finds something wrong. + * We ignore DRQ here and stop the HSM by + * changing hsm_task_state to HSM_ST_ERR and + * let the EH abort the command or reset the device. + */ + if (unlikely(status & (ATA_ERR | ATA_DF))) { + /* Some ATAPI tape drives forget to clear the ERR bit + * when doing the next command (mostly request sense). + * We ignore ERR here to workaround and proceed sending + * the CDB. + */ + if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { + ata_ehi_push_desc(ehi, "ST_FIRST: " + "DRQ=1 with device error, " + "dev_stat 0x%X", status); + qc->err_mask |= AC_ERR_HSM; + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + } + + /* Send the CDB (atapi) or the first data block (ata pio out). + * During the state transition, interrupt handler shouldn't + * be invoked before the data transfer is complete and + * hsm_task_state is changed. Hence, the following locking. + */ + if (in_wq) + spin_lock_irqsave(ap->lock, flags); + + if (qc->tf.protocol == ATA_PROT_PIO) { + /* PIO data out protocol. + * send first data block. + */ + + /* ata_pio_sectors() might change the state + * to HSM_ST_LAST. so, the state is changed here + * before ata_pio_sectors(). + */ + ap->hsm_task_state = HSM_ST; + ata_pio_sectors(qc); + } else + /* send CDB */ + atapi_send_cdb(ap, qc); + + if (in_wq) + spin_unlock_irqrestore(ap->lock, flags); + + /* if polling, ata_sff_pio_task() handles the rest. + * otherwise, interrupt handler takes over from here. + */ + break; + + case HSM_ST: + /* complete command or read/write the data register */ + if (qc->tf.protocol == ATAPI_PROT_PIO) { + /* ATAPI PIO protocol */ + if ((status & ATA_DRQ) == 0) { + /* No more data to transfer or device error. + * Device error will be tagged in HSM_ST_LAST. + */ + ap->hsm_task_state = HSM_ST_LAST; + goto fsm_start; + } + + /* Device should not ask for data transfer (DRQ=1) + * when it finds something wrong. + * We ignore DRQ here and stop the HSM by + * changing hsm_task_state to HSM_ST_ERR and + * let the EH abort the command or reset the device. + */ + if (unlikely(status & (ATA_ERR | ATA_DF))) { + ata_ehi_push_desc(ehi, "ST-ATAPI: " + "DRQ=1 with device error, " + "dev_stat 0x%X", status); + qc->err_mask |= AC_ERR_HSM; + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + atapi_pio_bytes(qc); + + if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) + /* bad ireason reported by device */ + goto fsm_start; + + } else { + /* ATA PIO protocol */ + if (unlikely((status & ATA_DRQ) == 0)) { + /* handle BSY=0, DRQ=0 as error */ + if (likely(status & (ATA_ERR | ATA_DF))) { + /* device stops HSM for abort/error */ + qc->err_mask |= AC_ERR_DEV; + + /* If diagnostic failed and this is + * IDENTIFY, it's likely a phantom + * device. Mark hint. + */ + if (qc->dev->horkage & + ATA_HORKAGE_DIAGNOSTIC) + qc->err_mask |= + AC_ERR_NODEV_HINT; + } else { + /* HSM violation. Let EH handle this. + * Phantom devices also trigger this + * condition. Mark hint. + */ + ata_ehi_push_desc(ehi, "ST-ATA: " + "DRQ=0 without device error, " + "dev_stat 0x%X", status); + qc->err_mask |= AC_ERR_HSM | + AC_ERR_NODEV_HINT; + } + + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + /* For PIO reads, some devices may ask for + * data transfer (DRQ=1) alone with ERR=1. + * We respect DRQ here and transfer one + * block of junk data before changing the + * hsm_task_state to HSM_ST_ERR. + * + * For PIO writes, ERR=1 DRQ=1 doesn't make + * sense since the data block has been + * transferred to the device. + */ + if (unlikely(status & (ATA_ERR | ATA_DF))) { + /* data might be corrputed */ + qc->err_mask |= AC_ERR_DEV; + + if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { + ata_pio_sectors(qc); + status = ata_wait_idle(ap); + } + + if (status & (ATA_BUSY | ATA_DRQ)) { + ata_ehi_push_desc(ehi, "ST-ATA: " + "BUSY|DRQ persists on ERR|DF, " + "dev_stat 0x%X", status); + qc->err_mask |= AC_ERR_HSM; + } + + /* There are oddball controllers with + * status register stuck at 0x7f and + * lbal/m/h at zero which makes it + * pass all other presence detection + * mechanisms we have. Set NODEV_HINT + * for it. Kernel bz#7241. + */ + if (status == 0x7f) + qc->err_mask |= AC_ERR_NODEV_HINT; + + /* ata_pio_sectors() might change the + * state to HSM_ST_LAST. so, the state + * is changed after ata_pio_sectors(). + */ + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + ata_pio_sectors(qc); + + if (ap->hsm_task_state == HSM_ST_LAST && + (!(qc->tf.flags & ATA_TFLAG_WRITE))) { + /* all data read */ + status = ata_wait_idle(ap); + goto fsm_start; + } + } + + poll_next = 1; + break; + + case HSM_ST_LAST: + if (unlikely(!ata_ok(status))) { + qc->err_mask |= __ac_err_mask(status); + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + /* no more data to transfer */ + DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", + ap->print_id, qc->dev->devno, status); + + WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); + + ap->hsm_task_state = HSM_ST_IDLE; + + /* complete taskfile transaction */ + ata_hsm_qc_complete(qc, in_wq); + + poll_next = 0; + break; + + case HSM_ST_ERR: + ap->hsm_task_state = HSM_ST_IDLE; + + /* complete taskfile transaction */ + ata_hsm_qc_complete(qc, in_wq); + + poll_next = 0; + break; + default: + poll_next = 0; + BUG(); + } + + return poll_next; +} +EXPORT_SYMBOL_GPL(ata_sff_hsm_move); + +void ata_sff_queue_work(struct work_struct *work) +{ + queue_work(ata_sff_wq, work); +} +EXPORT_SYMBOL_GPL(ata_sff_queue_work); + +void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay) +{ + queue_delayed_work(ata_sff_wq, dwork, delay); +} +EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work); + +void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) +{ + struct ata_port *ap = link->ap; + + WARN_ON((ap->sff_pio_task_link != NULL) && + (ap->sff_pio_task_link != link)); + ap->sff_pio_task_link = link; + + /* may fail if ata_sff_flush_pio_task() in progress */ + ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay)); +} +EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); + +void ata_sff_flush_pio_task(struct ata_port *ap) +{ + DPRINTK("ENTER\n"); + + cancel_delayed_work_sync(&ap->sff_pio_task); + ap->hsm_task_state = HSM_ST_IDLE; + ap->sff_pio_task_link = NULL; + + if (ata_msg_ctl(ap)) + ata_port_dbg(ap, "%s: EXIT\n", __func__); +} + +static void ata_sff_pio_task(struct work_struct *work) +{ + struct ata_port *ap = + container_of(work, struct ata_port, sff_pio_task.work); + struct ata_link *link = ap->sff_pio_task_link; + struct ata_queued_cmd *qc; + u8 status; + int poll_next; + + BUG_ON(ap->sff_pio_task_link == NULL); + /* qc can be NULL if timeout occurred */ + qc = ata_qc_from_tag(ap, link->active_tag); + if (!qc) { + ap->sff_pio_task_link = NULL; + return; + } + +fsm_start: + WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); + + /* + * This is purely heuristic. This is a fast path. + * Sometimes when we enter, BSY will be cleared in + * a chk-status or two. If not, the drive is probably seeking + * or something. Snooze for a couple msecs, then + * chk-status again. If still busy, queue delayed work. + */ + status = ata_sff_busy_wait(ap, ATA_BUSY, 5); + if (status & ATA_BUSY) { + ata_msleep(ap, 2); + status = ata_sff_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); + return; + } + } + + /* + * hsm_move() may trigger another command to be processed. + * clean the link beforehand. + */ + ap->sff_pio_task_link = NULL; + /* move the HSM */ + poll_next = ata_sff_hsm_move(ap, qc, status, 1); + + /* another command or interrupt handler + * may be running at this point. + */ + if (poll_next) + goto fsm_start; } /** - * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO) - * @qc: Info associated with this ATA transaction. + * ata_sff_qc_issue - issue taskfile to a SFF controller + * @qc: command to issue to device + * + * This function issues a PIO or NODATA command to a SFF + * controller. * * LOCKING: * spin_lock_irqsave(host lock) + * + * RETURNS: + * Zero on success, AC_ERR_* mask on failure */ - -static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc) +unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); - u8 dmactl; + struct ata_link *link = qc->dev->link; - /* load PRD table addr. */ - outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); + /* Use polling pio if the LLD doesn't handle + * interrupt driven pio and atapi CDB interrupt. + */ + if (ap->flags & ATA_FLAG_PIO_POLLING) + qc->tf.flags |= ATA_TFLAG_POLLING; - /* specify data direction, triple-check start bit is clear */ - dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); - if (!rw) - dmactl |= ATA_DMA_WR; - outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + /* select the device */ + ata_dev_select(ap, qc->dev->devno, 1, 0); - /* issue r/w command */ - ap->ops->exec_command(ap, &qc->tf); + /* start the command */ + switch (qc->tf.protocol) { + case ATA_PROT_NODATA: + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_qc_set_polling(qc); + + ata_tf_to_host(ap, &qc->tf); + ap->hsm_task_state = HSM_ST_LAST; + + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_sff_queue_pio_task(link, 0); + + break; + + case ATA_PROT_PIO: + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_qc_set_polling(qc); + + ata_tf_to_host(ap, &qc->tf); + + if (qc->tf.flags & ATA_TFLAG_WRITE) { + /* PIO data out protocol */ + ap->hsm_task_state = HSM_ST_FIRST; + ata_sff_queue_pio_task(link, 0); + + /* always send first data block using the + * ata_sff_pio_task() codepath. + */ + } else { + /* PIO data in protocol */ + ap->hsm_task_state = HSM_ST; + + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_sff_queue_pio_task(link, 0); + + /* if polling, ata_sff_pio_task() handles the + * rest. otherwise, interrupt handler takes + * over from here. + */ + } + + break; + + case ATAPI_PROT_PIO: + case ATAPI_PROT_NODATA: + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_qc_set_polling(qc); + + ata_tf_to_host(ap, &qc->tf); + + ap->hsm_task_state = HSM_ST_FIRST; + + /* send cdb by polling if no cdb interrupt */ + if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || + (qc->tf.flags & ATA_TFLAG_POLLING)) + ata_sff_queue_pio_task(link, 0); + break; + + default: + WARN_ON_ONCE(1); + return AC_ERR_SYSTEM; + } + + return 0; } +EXPORT_SYMBOL_GPL(ata_sff_qc_issue); /** - * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO) - * @qc: Info associated with this ATA transaction. + * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read + * @qc: qc to fill result TF for + * + * @qc is finished and result TF needs to be filled. Fill it + * using ->sff_tf_read. * * LOCKING: * spin_lock_irqsave(host lock) + * + * RETURNS: + * true indicating that result TF is successfully filled. */ - -static void ata_bmdma_start_pio (struct ata_queued_cmd *qc) +bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) { - struct ata_port *ap = qc->ap; - u8 dmactl; + qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); + return true; +} +EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); - /* start host DMA transaction */ - dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - outb(dmactl | ATA_DMA_START, - ap->ioaddr.bmdma_addr + ATA_DMA_CMD); +static unsigned int ata_sff_idle_irq(struct ata_port *ap) +{ + ap->stats.idle_irq++; + +#ifdef ATA_IRQ_TRAP + if ((ap->stats.idle_irq % 1000) == 0) { + ap->ops->sff_check_status(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + ata_port_warn(ap, "irq trap\n"); + return 1; + } +#endif + return 0; /* irq not handled */ } +static unsigned int __ata_sff_port_intr(struct ata_port *ap, + struct ata_queued_cmd *qc, + bool hsmv_on_idle) +{ + u8 status; + + VPRINTK("ata%u: protocol %d task_state %d\n", + ap->print_id, qc->tf.protocol, ap->hsm_task_state); + + /* Check whether we are expecting interrupt in this state */ + switch (ap->hsm_task_state) { + case HSM_ST_FIRST: + /* Some pre-ATAPI-4 devices assert INTRQ + * at this state when ready to receive CDB. + */ + + /* Check the ATA_DFLAG_CDB_INTR flag is enough here. + * The flag was turned on only for atapi devices. No + * need to check ata_is_atapi(qc->tf.protocol) again. + */ + if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) + return ata_sff_idle_irq(ap); + break; + case HSM_ST_IDLE: + return ata_sff_idle_irq(ap); + default: + break; + } + + /* check main status, clearing INTRQ if needed */ + status = ata_sff_irq_status(ap); + if (status & ATA_BUSY) { + if (hsmv_on_idle) { + /* BMDMA engine is already stopped, we're screwed */ + qc->err_mask |= AC_ERR_HSM; + ap->hsm_task_state = HSM_ST_ERR; + } else + return ata_sff_idle_irq(ap); + } + + /* clear irq events */ + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + + ata_sff_hsm_move(ap, qc, status, 0); + + return 1; /* irq handled */ +} /** - * ata_bmdma_start - Start a PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. - * - * Writes the ATA_DMA_START flag to the DMA command register. + * ata_sff_port_intr - Handle SFF port interrupt + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine * - * May be used as the bmdma_start() entry in ata_port_operations. + * Handle port interrupt for given queued command. * * LOCKING: * spin_lock_irqsave(host lock) + * + * RETURNS: + * One if interrupt was handled, zero if not (shared irq). */ -void ata_bmdma_start(struct ata_queued_cmd *qc) +unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) { - if (qc->ap->flags & ATA_FLAG_MMIO) - ata_bmdma_start_mmio(qc); - else - ata_bmdma_start_pio(qc); + return __ata_sff_port_intr(ap, qc, false); } +EXPORT_SYMBOL_GPL(ata_sff_port_intr); +static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, + unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) +{ + struct ata_host *host = dev_instance; + bool retried = false; + unsigned int i; + unsigned int handled, idle, polling; + unsigned long flags; + + /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ + spin_lock_irqsave(&host->lock, flags); + +retry: + handled = idle = polling = 0; + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc) { + if (!(qc->tf.flags & ATA_TFLAG_POLLING)) + handled |= port_intr(ap, qc); + else + polling |= 1 << i; + } else + idle |= 1 << i; + } + + /* + * If no port was expecting IRQ but the controller is actually + * asserting IRQ line, nobody cared will ensue. Check IRQ + * pending status if available and clear spurious IRQ. + */ + if (!handled && !retried) { + bool retry = false; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + if (polling & (1 << i)) + continue; + + if (!ap->ops->sff_irq_check || + !ap->ops->sff_irq_check(ap)) + continue; + + if (idle & (1 << i)) { + ap->ops->sff_check_status(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + } else { + /* clear INTRQ and check if BUSY cleared */ + if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) + retry |= true; + /* + * With command in flight, we can't do + * sff_irq_clear() w/o racing with completion. + */ + } + } + + if (retry) { + retried = true; + goto retry; + } + } + + spin_unlock_irqrestore(&host->lock, flags); + + return IRQ_RETVAL(handled); +} /** - * ata_bmdma_setup - Set up PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. + * ata_sff_interrupt - Default SFF ATA host interrupt handler + * @irq: irq line (unused) + * @dev_instance: pointer to our ata_host information structure * - * Writes address of PRD table to device's PRD Table Address - * register, sets the DMA control register, and calls - * ops->exec_command() to start the transfer. - * - * May be used as the bmdma_setup() entry in ata_port_operations. + * Default interrupt handler for PCI IDE devices. Calls + * ata_sff_port_intr() for each port that is not disabled. * * LOCKING: - * spin_lock_irqsave(host lock) + * Obtains host lock during operation. + * + * RETURNS: + * IRQ_NONE or IRQ_HANDLED. */ -void ata_bmdma_setup(struct ata_queued_cmd *qc) +irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) { - if (qc->ap->flags & ATA_FLAG_MMIO) - ata_bmdma_setup_mmio(qc); - else - ata_bmdma_setup_pio(qc); + return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr); } +EXPORT_SYMBOL_GPL(ata_sff_interrupt); +/** + * ata_sff_lost_interrupt - Check for an apparent lost interrupt + * @ap: port that appears to have timed out + * + * Called from the libata error handlers when the core code suspects + * an interrupt has been lost. If it has complete anything we can and + * then return. Interface must support altstatus for this faster + * recovery to occur. + * + * Locking: + * Caller holds host lock + */ + +void ata_sff_lost_interrupt(struct ata_port *ap) +{ + u8 status; + struct ata_queued_cmd *qc; + + /* Only one outstanding command per SFF channel */ + qc = ata_qc_from_tag(ap, ap->link.active_tag); + /* We cannot lose an interrupt on a non-existent or polled command */ + if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) + return; + /* See if the controller thinks it is still busy - if so the command + isn't a lost IRQ but is still in progress */ + status = ata_sff_altstatus(ap); + if (status & ATA_BUSY) + return; + + /* There was a command running, we are no longer busy and we have + no interrupt. */ + ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", + status); + /* Run the host interrupt logic as if the interrupt had not been + lost */ + ata_sff_port_intr(ap, qc); +} +EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); /** - * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. - * @ap: Port associated with this ATA transaction. + * ata_sff_freeze - Freeze SFF controller port + * @ap: port to freeze * - * Clear interrupt and error flags in DMA status register. + * Freeze SFF controller port. * - * May be used as the irq_clear() entry in ata_port_operations. + * LOCKING: + * Inherited from caller. + */ +void ata_sff_freeze(struct ata_port *ap) +{ + ap->ctl |= ATA_NIEN; + ap->last_ctl = ap->ctl; + + if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) + ata_sff_set_devctl(ap, ap->ctl); + + /* Under certain circumstances, some controllers raise IRQ on + * ATA_NIEN manipulation. Also, many controllers fail to mask + * previously pending IRQ on ATA_NIEN assertion. Clear it. + */ + ap->ops->sff_check_status(ap); + + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); +} +EXPORT_SYMBOL_GPL(ata_sff_freeze); + +/** + * ata_sff_thaw - Thaw SFF controller port + * @ap: port to thaw + * + * Thaw SFF controller port. * * LOCKING: - * spin_lock_irqsave(host lock) + * Inherited from caller. */ +void ata_sff_thaw(struct ata_port *ap) +{ + /* clear & re-enable interrupts */ + ap->ops->sff_check_status(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + ata_sff_irq_on(ap); +} +EXPORT_SYMBOL_GPL(ata_sff_thaw); -void ata_bmdma_irq_clear(struct ata_port *ap) +/** + * ata_sff_prereset - prepare SFF link for reset + * @link: SFF link to be reset + * @deadline: deadline jiffies for the operation + * + * SFF link @link is about to be reset. Initialize it. It first + * calls ata_std_prereset() and wait for !BSY if the port is + * being softreset. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int ata_sff_prereset(struct ata_link *link, unsigned long deadline) { - if (!ap->ioaddr.bmdma_addr) - return; + struct ata_eh_context *ehc = &link->eh_context; + int rc; - if (ap->flags & ATA_FLAG_MMIO) { - void __iomem *mmio = - ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS; - writeb(readb(mmio), mmio); - } else { - unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; - outb(inb(addr), addr); + rc = ata_std_prereset(link, deadline); + if (rc) + return rc; + + /* if we're about to do hardreset, nothing more to do */ + if (ehc->i.action & ATA_EH_HARDRESET) + return 0; + + /* wait for !BSY if we don't know that no device is attached */ + if (!ata_link_offline(link)) { + rc = ata_sff_wait_ready(link, deadline); + if (rc && rc != -ENODEV) { + ata_link_warn(link, + "device not ready (errno=%d), forcing hardreset\n", + rc); + ehc->i.action |= ATA_EH_HARDRESET; + } } + + return 0; } +EXPORT_SYMBOL_GPL(ata_sff_prereset); +/** + * ata_devchk - PATA device presence detection + * @ap: ATA channel to examine + * @device: Device to examine (starting at zero) + * + * This technique was originally described in + * Hale Landis's ATADRVR (www.ata-atapi.com), and + * later found its way into the ATA/ATAPI spec. + * + * Write a pattern to the ATA shadow registers, + * and if a device is present, it will respond by + * correctly storing and echoing back the + * ATA shadow register contents. + * + * LOCKING: + * caller. + */ +static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 nsect, lbal; + + ap->ops->sff_dev_select(ap, device); + + iowrite8(0x55, ioaddr->nsect_addr); + iowrite8(0xaa, ioaddr->lbal_addr); + + iowrite8(0xaa, ioaddr->nsect_addr); + iowrite8(0x55, ioaddr->lbal_addr); + + iowrite8(0x55, ioaddr->nsect_addr); + iowrite8(0xaa, ioaddr->lbal_addr); + + nsect = ioread8(ioaddr->nsect_addr); + lbal = ioread8(ioaddr->lbal_addr); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} /** - * ata_bmdma_status - Read PCI IDE BMDMA status - * @ap: Port associated with this ATA transaction. + * ata_sff_dev_classify - Parse returned ATA device signature + * @dev: ATA device to classify (starting at zero) + * @present: device seems present + * @r_err: Value of error register on completion * - * Read and return BMDMA status register. + * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, + * an ATA/ATAPI-defined set of values is placed in the ATA + * shadow registers, indicating the results of device detection + * and diagnostics. * - * May be used as the bmdma_status() entry in ata_port_operations. + * Select the ATA device, and read the values from the ATA shadow + * registers. Then parse according to the Error register value, + * and the spec-defined values examined by ata_dev_classify(). * * LOCKING: - * spin_lock_irqsave(host lock) + * caller. + * + * RETURNS: + * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. */ +unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, + u8 *r_err) +{ + struct ata_port *ap = dev->link->ap; + struct ata_taskfile tf; + unsigned int class; + u8 err; + + ap->ops->sff_dev_select(ap, dev->devno); + + memset(&tf, 0, sizeof(tf)); + + ap->ops->sff_tf_read(ap, &tf); + err = tf.feature; + if (r_err) + *r_err = err; + + /* see if device passed diags: continue and warn later */ + if (err == 0) + /* diagnostic fail : do nothing _YET_ */ + dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; + else if (err == 1) + /* do nothing */ ; + else if ((dev->devno == 0) && (err == 0x81)) + /* do nothing */ ; + else + return ATA_DEV_NONE; -u8 ata_bmdma_status(struct ata_port *ap) + /* determine if device is ATA or ATAPI */ + class = ata_dev_classify(&tf); + + if (class == ATA_DEV_UNKNOWN) { + /* If the device failed diagnostic, it's likely to + * have reported incorrect device signature too. + * Assume ATA device if the device seems present but + * device signature is invalid with diagnostic + * failure. + */ + if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) + class = ATA_DEV_ATA; + else + class = ATA_DEV_NONE; + } else if ((class == ATA_DEV_ATA) && + (ap->ops->sff_check_status(ap) == 0)) + class = ATA_DEV_NONE; + + return class; +} +EXPORT_SYMBOL_GPL(ata_sff_dev_classify); + +/** + * ata_sff_wait_after_reset - wait for devices to become ready after reset + * @link: SFF link which is just reset + * @devmask: mask of present devices + * @deadline: deadline jiffies for the operation + * + * Wait devices attached to SFF @link to become ready after + * reset. It contains preceding 150ms wait to avoid accessing TF + * status register too early. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -ENODEV if some or all of devices in @devmask + * don't seem to exist. -errno on other errors. + */ +int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, + unsigned long deadline) { - u8 host_stat; - if (ap->flags & ATA_FLAG_MMIO) { - void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; - host_stat = readb(mmio + ATA_DMA_STATUS); - } else - host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); - return host_stat; + struct ata_port *ap = link->ap; + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int dev0 = devmask & (1 << 0); + unsigned int dev1 = devmask & (1 << 1); + int rc, ret = 0; + + ata_msleep(ap, ATA_WAIT_AFTER_RESET); + + /* always check readiness of the master device */ + rc = ata_sff_wait_ready(link, deadline); + /* -ENODEV means the odd clown forgot the D7 pulldown resistor + * and TF status is 0xff, bail out on it too. + */ + if (rc) + return rc; + + /* if device 1 was found in ata_devchk, wait for register + * access briefly, then wait for BSY to clear. + */ + if (dev1) { + int i; + + ap->ops->sff_dev_select(ap, 1); + + /* Wait for register access. Some ATAPI devices fail + * to set nsect/lbal after reset, so don't waste too + * much time on it. We're gonna wait for !BSY anyway. + */ + for (i = 0; i < 2; i++) { + u8 nsect, lbal; + + nsect = ioread8(ioaddr->nsect_addr); + lbal = ioread8(ioaddr->lbal_addr); + if ((nsect == 1) && (lbal == 1)) + break; + ata_msleep(ap, 50); /* give drive a breather */ + } + + rc = ata_sff_wait_ready(link, deadline); + if (rc) { + if (rc != -ENODEV) + return rc; + ret = rc; + } + } + + /* is all this really necessary? */ + ap->ops->sff_dev_select(ap, 0); + if (dev1) + ap->ops->sff_dev_select(ap, 1); + if (dev0) + ap->ops->sff_dev_select(ap, 0); + + return ret; } +EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); +static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, + unsigned long deadline) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); + + /* software reset. causes dev0 to be selected */ + iowrite8(ap->ctl, ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + iowrite8(ap->ctl, ioaddr->ctl_addr); + ap->last_ctl = ap->ctl; + + /* wait the port to become ready */ + return ata_sff_wait_after_reset(&ap->link, devmask, deadline); +} /** - * ata_bmdma_stop - Stop PCI IDE BMDMA transfer - * @qc: Command we are ending DMA for + * ata_sff_softreset - reset host port via ATA SRST + * @link: ATA link to reset + * @classes: resulting classes of attached devices + * @deadline: deadline jiffies for the operation * - * Clears the ATA_DMA_START flag in the dma control register - * - * May be used as the bmdma_stop() entry in ata_port_operations. + * Reset host port using ATA SRST. * * LOCKING: - * spin_lock_irqsave(host lock) + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, -errno otherwise. */ - -void ata_bmdma_stop(struct ata_queued_cmd *qc) +int ata_sff_softreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) { - struct ata_port *ap = qc->ap; - if (ap->flags & ATA_FLAG_MMIO) { - void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; + struct ata_port *ap = link->ap; + unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; + unsigned int devmask = 0; + int rc; + u8 err; - /* clear start/stop bit */ - writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, - mmio + ATA_DMA_CMD); - } else { - /* clear start/stop bit */ - outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, - ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + DPRINTK("ENTER\n"); + + /* determine if device 0/1 are present */ + if (ata_devchk(ap, 0)) + devmask |= (1 << 0); + if (slave_possible && ata_devchk(ap, 1)) + devmask |= (1 << 1); + + /* select device 0 again */ + ap->ops->sff_dev_select(ap, 0); + + /* issue bus reset */ + DPRINTK("about to softreset, devmask=%x\n", devmask); + rc = ata_bus_softreset(ap, devmask, deadline); + /* if link is occupied, -ENODEV too is an error */ + if (rc && (rc != -ENODEV || sata_scr_valid(link))) { + ata_link_err(link, "SRST failed (errno=%d)\n", rc); + return rc; } - /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ - ata_altstatus(ap); /* dummy read */ + /* determine by signature whether we have ATA or ATAPI devices */ + classes[0] = ata_sff_dev_classify(&link->device[0], + devmask & (1 << 0), &err); + if (slave_possible && err != 0x81) + classes[1] = ata_sff_dev_classify(&link->device[1], + devmask & (1 << 1), &err); + + DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); + return 0; } +EXPORT_SYMBOL_GPL(ata_sff_softreset); /** - * ata_bmdma_freeze - Freeze BMDMA controller port - * @ap: port to freeze + * sata_sff_hardreset - reset host port via SATA phy reset + * @link: link to reset + * @class: resulting class of attached device + * @deadline: deadline jiffies for the operation * - * Freeze BMDMA controller port. + * SATA phy-reset host port using DET bits of SControl register, + * wait for !BSY and classify the attached device. * * LOCKING: - * Inherited from caller. + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, -errno otherwise. */ -void ata_bmdma_freeze(struct ata_port *ap) +int sata_sff_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) { - struct ata_ioports *ioaddr = &ap->ioaddr; + struct ata_eh_context *ehc = &link->eh_context; + const unsigned long *timing = sata_ehc_deb_timing(ehc); + bool online; + int rc; - ap->ctl |= ATA_NIEN; - ap->last_ctl = ap->ctl; + rc = sata_link_hardreset(link, timing, deadline, &online, + ata_sff_check_ready); + if (online) + *class = ata_sff_dev_classify(link->device, 1, NULL); - if (ap->flags & ATA_FLAG_MMIO) - writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr); - else - outb(ap->ctl, ioaddr->ctl_addr); + DPRINTK("EXIT, class=%u\n", *class); + return rc; } +EXPORT_SYMBOL_GPL(sata_sff_hardreset); /** - * ata_bmdma_thaw - Thaw BMDMA controller port - * @ap: port to thaw + * ata_sff_postreset - SFF postreset callback + * @link: the target SFF ata_link + * @classes: classes of attached devices * - * Thaw BMDMA controller port. + * This function is invoked after a successful reset. It first + * calls ata_std_postreset() and performs SFF specific postreset + * processing. * * LOCKING: - * Inherited from caller. + * Kernel thread context (may sleep) */ -void ata_bmdma_thaw(struct ata_port *ap) +void ata_sff_postreset(struct ata_link *link, unsigned int *classes) { - /* clear & re-enable interrupts */ - ata_chk_status(ap); - ap->ops->irq_clear(ap); - if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ - ata_irq_on(ap); + struct ata_port *ap = link->ap; + + ata_std_postreset(link, classes); + + /* is double-select really necessary? */ + if (classes[0] != ATA_DEV_NONE) + ap->ops->sff_dev_select(ap, 1); + if (classes[1] != ATA_DEV_NONE) + ap->ops->sff_dev_select(ap, 0); + + /* bail out if no device is present */ + if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { + DPRINTK("EXIT, no device\n"); + return; + } + + /* set up device control */ + if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) { + ata_sff_set_devctl(ap, ap->ctl); + ap->last_ctl = ap->ctl; + } } +EXPORT_SYMBOL_GPL(ata_sff_postreset); /** - * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller + * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers + * @qc: command + * + * Drain the FIFO and device of any stuck data following a command + * failing to complete. In some cases this is necessary before a + * reset will recover the device. + * + */ + +void ata_sff_drain_fifo(struct ata_queued_cmd *qc) +{ + int count; + struct ata_port *ap; + + /* We only need to flush incoming data when a command was running */ + if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) + return; + + ap = qc->ap; + /* Drain up to 64K of data before we give up this recovery method */ + for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) + && count < 65536; count += 2) + ioread16(ap->ioaddr.data_addr); + + /* Can become DEBUG later */ + if (count) + ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count); + +} +EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); + +/** + * ata_sff_error_handler - Stock error handler for SFF controller * @ap: port to handle error for - * @prereset: prereset method (can be NULL) - * @softreset: softreset method (can be NULL) - * @hardreset: hardreset method (can be NULL) - * @postreset: postreset method (can be NULL) * - * Handle error for ATA BMDMA controller. It can handle both + * Stock error handler for SFF controller. It can handle both * PATA and SATA controllers. Many controllers should be able to * use this EH as-is or with some added handling before and * after. * - * This function is intended to be used for constructing - * ->error_handler callback by low level drivers. - * * LOCKING: * Kernel thread context (may sleep) */ -void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, - ata_reset_fn_t softreset, ata_reset_fn_t hardreset, - ata_postreset_fn_t postreset) +void ata_sff_error_handler(struct ata_port *ap) { - struct ata_eh_context *ehc = &ap->eh_context; + ata_reset_fn_t softreset = ap->ops->softreset; + ata_reset_fn_t hardreset = ap->ops->hardreset; struct ata_queued_cmd *qc; unsigned long flags; - int thaw = 0; - qc = __ata_qc_from_tag(ap, ap->active_tag); + qc = __ata_qc_from_tag(ap, ap->link.active_tag); if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) qc = NULL; - /* reset PIO HSM and stop DMA engine */ spin_lock_irqsave(ap->lock, flags); - ap->hsm_task_state = HSM_ST_IDLE; + /* + * We *MUST* do FIFO draining before we issue a reset as + * several devices helpfully clear their internal state and + * will lock solid if we touch the data port post reset. Pass + * qc in case anyone wants to do different PIO/DMA recovery or + * has per command fixups + */ + if (ap->ops->sff_drain_fifo) + ap->ops->sff_drain_fifo(qc); - if (qc && (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATA_PROT_ATAPI_DMA)) { - u8 host_stat; + spin_unlock_irqrestore(ap->lock, flags); - host_stat = ata_bmdma_status(ap); + /* ignore ata_sff_softreset if ctl isn't accessible */ + if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) + softreset = NULL; - ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat); + /* ignore built-in hardresets if SCR access is not available */ + if ((hardreset == sata_std_hardreset || + hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) + hardreset = NULL; - /* BMDMA controllers indicate host bus error by - * setting DMA_ERR bit and timing out. As it wasn't - * really a timeout event, adjust error mask and - * cancel frozen state. + ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, + ap->ops->postreset); +} +EXPORT_SYMBOL_GPL(ata_sff_error_handler); + +/** + * ata_sff_std_ports - initialize ioaddr with standard port offsets. + * @ioaddr: IO address structure to be initialized + * + * Utility function which initializes data_addr, error_addr, + * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, + * device_addr, status_addr, and command_addr to standard offsets + * relative to cmd_addr. + * + * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. + */ +void ata_sff_std_ports(struct ata_ioports *ioaddr) +{ + ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; + ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; + ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; + ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; + ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; + ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; + ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; + ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; + ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; + ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; +} +EXPORT_SYMBOL_GPL(ata_sff_std_ports); + +#ifdef CONFIG_PCI + +static int ata_resources_present(struct pci_dev *pdev, int port) +{ + int i; + + /* Check the PCI resources for this channel are enabled */ + port = port * 2; + for (i = 0; i < 2; i++) { + if (pci_resource_start(pdev, port + i) == 0 || + pci_resource_len(pdev, port + i) == 0) + return 0; + } + return 1; +} + +/** + * ata_pci_sff_init_host - acquire native PCI ATA resources and init host + * @host: target ATA host + * + * Acquire native PCI ATA resources for @host and initialize the + * first two ports of @host accordingly. Ports marked dummy are + * skipped and allocation failure makes the port dummy. + * + * Note that native PCI resources are valid even for legacy hosts + * as we fix up pdev resources array early in boot, so this + * function can be used for both native and legacy SFF hosts. + * + * LOCKING: + * Inherited from calling layer (may sleep). + * + * RETURNS: + * 0 if at least one port is initialized, -ENODEV if no port is + * available. + */ +int ata_pci_sff_init_host(struct ata_host *host) +{ + struct device *gdev = host->dev; + struct pci_dev *pdev = to_pci_dev(gdev); + unsigned int mask = 0; + int i, rc; + + /* request, iomap BARs and init port addresses accordingly */ + for (i = 0; i < 2; i++) { + struct ata_port *ap = host->ports[i]; + int base = i * 2; + void __iomem * const *iomap; + + if (ata_port_is_dummy(ap)) + continue; + + /* Discard disabled ports. Some controllers show + * their unused channels this way. Disabled ports are + * made dummy. */ - if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) { - qc->err_mask = AC_ERR_HOST_BUS; - thaw = 1; + if (!ata_resources_present(pdev, i)) { + ap->ops = &ata_dummy_port_ops; + continue; } - ap->ops->bmdma_stop(qc); - } + rc = pcim_iomap_regions(pdev, 0x3 << base, + dev_driver_string(gdev)); + if (rc) { + dev_warn(gdev, + "failed to request/iomap BARs for port %d (errno=%d)\n", + i, rc); + if (rc == -EBUSY) + pcim_pin_device(pdev); + ap->ops = &ata_dummy_port_ops; + continue; + } + host->iomap = iomap = pcim_iomap_table(pdev); - ata_altstatus(ap); - ata_chk_status(ap); - ap->ops->irq_clear(ap); + ap->ioaddr.cmd_addr = iomap[base]; + ap->ioaddr.altstatus_addr = + ap->ioaddr.ctl_addr = (void __iomem *) + ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); + ata_sff_std_ports(&ap->ioaddr); - spin_unlock_irqrestore(ap->lock, flags); + ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", + (unsigned long long)pci_resource_start(pdev, base), + (unsigned long long)pci_resource_start(pdev, base + 1)); - if (thaw) - ata_eh_thaw_port(ap); + mask |= 1 << i; + } - /* PIO and DMA engines have been stopped, perform recovery */ - ata_do_eh(ap, prereset, softreset, hardreset, postreset); + if (!mask) { + dev_err(gdev, "no available native port\n"); + return -ENODEV; + } + + return 0; } +EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); /** - * ata_bmdma_error_handler - Stock error handler for BMDMA controller - * @ap: port to handle error for + * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host + * @pdev: target PCI device + * @ppi: array of port_info, must be enough for two ports + * @r_host: out argument for the initialized ATA host * - * Stock error handler for BMDMA controller. + * Helper to allocate PIO-only SFF ATA host for @pdev, acquire + * all PCI resources and initialize it accordingly in one go. * * LOCKING: - * Kernel thread context (may sleep) + * Inherited from calling layer (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. */ -void ata_bmdma_error_handler(struct ata_port *ap) +int ata_pci_sff_prepare_host(struct pci_dev *pdev, + const struct ata_port_info * const *ppi, + struct ata_host **r_host) { - ata_reset_fn_t hardreset; + struct ata_host *host; + int rc; + + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); + if (!host) { + dev_err(&pdev->dev, "failed to allocate ATA host\n"); + rc = -ENOMEM; + goto err_out; + } - hardreset = NULL; - if (sata_scr_valid(ap)) - hardreset = sata_std_hardreset; + rc = ata_pci_sff_init_host(host); + if (rc) + goto err_out; - ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset, - ata_std_postreset); + devres_remove_group(&pdev->dev, NULL); + *r_host = host; + return 0; + +err_out: + devres_release_group(&pdev->dev, NULL); + return rc; } +EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); /** - * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for - * BMDMA controller - * @qc: internal command to clean up + * ata_pci_sff_activate_host - start SFF host, request IRQ and register it + * @host: target SFF ATA host + * @irq_handler: irq_handler used when requesting IRQ(s) + * @sht: scsi_host_template to use when registering the host + * + * This is the counterpart of ata_host_activate() for SFF ATA + * hosts. This separate helper is necessary because SFF hosts + * use two separate interrupts in legacy mode. * * LOCKING: - * Kernel thread context (may sleep) + * Inherited from calling layer (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. */ -void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) +int ata_pci_sff_activate_host(struct ata_host *host, + irq_handler_t irq_handler, + struct scsi_host_template *sht) { - ata_bmdma_stop(qc); -} + struct device *dev = host->dev; + struct pci_dev *pdev = to_pci_dev(dev); + const char *drv_name = dev_driver_string(host->dev); + int legacy_mode = 0, rc; -#ifdef CONFIG_PCI -/** - * ata_pci_init_native_mode - Initialize native-mode driver - * @pdev: pci device to be initialized - * @port: array[2] of pointers to port info structures. - * @ports: bitmap of ports present - * - * Utility function which allocates and initializes an - * ata_probe_ent structure for a standard dual-port - * PIO-based IDE controller. The returned ata_probe_ent - * structure can be passed to ata_device_add(). The returned - * ata_probe_ent structure should then be freed with kfree(). - * - * The caller need only pass the address of the primary port, the - * secondary will be deduced automatically. If the device has non - * standard secondary port mappings this function can be called twice, - * once for each interface. - */ - -struct ata_probe_ent * -ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports) -{ - struct ata_probe_ent *probe_ent = - ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); - int p = 0; - unsigned long bmdma; - - if (!probe_ent) - return NULL; - - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->private_data = port[0]->private_data; - - if (ports & ATA_PORT_PRIMARY) { - probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0); - probe_ent->port[p].altstatus_addr = - probe_ent->port[p].ctl_addr = - pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; - bmdma = pci_resource_start(pdev, 4); - if (bmdma) { - if (inb(bmdma + 2) & 0x80) - probe_ent->_host_flags |= ATA_HOST_SIMPLEX; - probe_ent->port[p].bmdma_addr = bmdma; - } - ata_std_ports(&probe_ent->port[p]); - p++; + rc = ata_host_start(host); + if (rc) + return rc; + + if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { + u8 tmp8, mask; + + /* TODO: What if one channel is in native mode ... */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); + mask = (1 << 2) | (1 << 0); + if ((tmp8 & mask) != mask) + legacy_mode = 1; } - if (ports & ATA_PORT_SECONDARY) { - probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2); - probe_ent->port[p].altstatus_addr = - probe_ent->port[p].ctl_addr = - pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; - bmdma = pci_resource_start(pdev, 4); - if (bmdma) { - bmdma += 8; - if(inb(bmdma + 2) & 0x80) - probe_ent->_host_flags |= ATA_HOST_SIMPLEX; - probe_ent->port[p].bmdma_addr = bmdma; + if (!devres_open_group(dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + if (!legacy_mode && pdev->irq) { + int i; + + rc = devm_request_irq(dev, pdev->irq, irq_handler, + IRQF_SHARED, drv_name, host); + if (rc) + goto out; + + for (i = 0; i < 2; i++) { + if (ata_port_is_dummy(host->ports[i])) + continue; + ata_port_desc(host->ports[i], "irq %d", pdev->irq); + } + } else if (legacy_mode) { + if (!ata_port_is_dummy(host->ports[0])) { + rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), + irq_handler, IRQF_SHARED, + drv_name, host); + if (rc) + goto out; + + ata_port_desc(host->ports[0], "irq %d", + ATA_PRIMARY_IRQ(pdev)); + } + + if (!ata_port_is_dummy(host->ports[1])) { + rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev), + irq_handler, IRQF_SHARED, + drv_name, host); + if (rc) + goto out; + + ata_port_desc(host->ports[1], "irq %d", + ATA_SECONDARY_IRQ(pdev)); } - ata_std_ports(&probe_ent->port[p]); - probe_ent->pinfo2 = port[1]; - p++; } - probe_ent->n_ports = p; - return probe_ent; + rc = ata_host_register(host, sht); +out: + if (rc == 0) + devres_remove_group(dev, NULL); + else + devres_release_group(dev, NULL); + + return rc; } +EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); +static const struct ata_port_info *ata_sff_find_valid_pi( + const struct ata_port_info * const *ppi) +{ + int i; + + /* look up the first valid port_info */ + for (i = 0; i < 2 && ppi[i]; i++) + if (ppi[i]->port_ops != &ata_dummy_port_ops) + return ppi[i]; -static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, - struct ata_port_info **port, int port_mask) + return NULL; +} + +static int ata_pci_init_one(struct pci_dev *pdev, + const struct ata_port_info * const *ppi, + struct scsi_host_template *sht, void *host_priv, + int hflags, bool bmdma) { - struct ata_probe_ent *probe_ent; - unsigned long bmdma = pci_resource_start(pdev, 4); + struct device *dev = &pdev->dev; + const struct ata_port_info *pi; + struct ata_host *host = NULL; + int rc; - probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); - if (!probe_ent) - return NULL; - - probe_ent->n_ports = 2; - probe_ent->private_data = port[0]->private_data; - - if (port_mask & ATA_PORT_PRIMARY) { - probe_ent->irq = ATA_PRIMARY_IRQ; - probe_ent->port[0].cmd_addr = ATA_PRIMARY_CMD; - probe_ent->port[0].altstatus_addr = - probe_ent->port[0].ctl_addr = ATA_PRIMARY_CTL; - if (bmdma) { - probe_ent->port[0].bmdma_addr = bmdma; - if (inb(bmdma + 2) & 0x80) - probe_ent->_host_flags |= ATA_HOST_SIMPLEX; - } - ata_std_ports(&probe_ent->port[0]); - } else - probe_ent->dummy_port_mask |= ATA_PORT_PRIMARY; + DPRINTK("ENTER\n"); - if (port_mask & ATA_PORT_SECONDARY) { - if (probe_ent->irq) - probe_ent->irq2 = ATA_SECONDARY_IRQ; - else - probe_ent->irq = ATA_SECONDARY_IRQ; - probe_ent->port[1].cmd_addr = ATA_SECONDARY_CMD; - probe_ent->port[1].altstatus_addr = - probe_ent->port[1].ctl_addr = ATA_SECONDARY_CTL; - if (bmdma) { - probe_ent->port[1].bmdma_addr = bmdma + 8; - if (inb(bmdma + 10) & 0x80) - probe_ent->_host_flags |= ATA_HOST_SIMPLEX; - } - ata_std_ports(&probe_ent->port[1]); - probe_ent->pinfo2 = port[1]; + pi = ata_sff_find_valid_pi(ppi); + if (!pi) { + dev_err(&pdev->dev, "no valid port_info specified\n"); + return -EINVAL; + } + + if (!devres_open_group(dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + rc = pcim_enable_device(pdev); + if (rc) + goto out; + +#ifdef CONFIG_ATA_BMDMA + if (bmdma) + /* prepare and activate BMDMA host */ + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); + else +#endif + /* prepare and activate SFF host */ + rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + if (rc) + goto out; + host->private_data = host_priv; + host->flags |= hflags; + +#ifdef CONFIG_ATA_BMDMA + if (bmdma) { + pci_set_master(pdev); + rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); } else - probe_ent->dummy_port_mask |= ATA_PORT_SECONDARY; +#endif + rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); +out: + if (rc == 0) + devres_remove_group(&pdev->dev, NULL); + else + devres_release_group(&pdev->dev, NULL); - return probe_ent; + return rc; } - /** - * ata_pci_init_one - Initialize/register PCI IDE host controller + * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller * @pdev: Controller to be initialized - * @port_info: Information from low-level host driver - * @n_ports: Number of ports attached to host controller + * @ppi: array of port_info, must be enough for two ports + * @sht: scsi_host_template to use when registering the host + * @host_priv: host private_data + * @hflag: host flags * * This is a helper function which can be called from a driver's * xxx_init_one() probe function if the hardware uses traditional - * IDE taskfile registers. - * - * This function calls pci_enable_device(), reserves its register - * regions, sets the dma mask, enables bus master mode, and calls - * ata_device_add() + * IDE taskfile registers and is PIO only. * * ASSUMPTION: * Nobody makes a single channel controller that appears solely as @@ -940,158 +2575,586 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, * RETURNS: * Zero on success, negative on errno-based value on error. */ - -int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, - unsigned int n_ports) +int ata_pci_sff_init_one(struct pci_dev *pdev, + const struct ata_port_info * const *ppi, + struct scsi_host_template *sht, void *host_priv, int hflag) { - struct ata_probe_ent *probe_ent = NULL; - struct ata_port_info *port[2]; - u8 tmp8, mask; - unsigned int legacy_mode = 0; - int disable_dev_on_err = 1; - int rc; + return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0); +} +EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); - DPRINTK("ENTER\n"); +#endif /* CONFIG_PCI */ - port[0] = port_info[0]; - if (n_ports > 1) - port[1] = port_info[1]; - else - port[1] = port[0]; +/* + * BMDMA support + */ - if ((port[0]->flags & ATA_FLAG_NO_LEGACY) == 0 - && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { - /* TODO: What if one channel is in native mode ... */ - pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); - mask = (1 << 2) | (1 << 0); - if ((tmp8 & mask) != mask) - legacy_mode = (1 << 3); - } +#ifdef CONFIG_ATA_BMDMA - /* FIXME... */ - if ((!legacy_mode) && (n_ports > 2)) { - printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n"); - n_ports = 2; - /* For now */ - } +const struct ata_port_operations ata_bmdma_port_ops = { + .inherits = &ata_sff_port_ops, - /* FIXME: Really for ATA it isn't safe because the device may be - multi-purpose and we want to leave it alone if it was already - enabled. Secondly for shared use as Arjan says we want refcounting + .error_handler = ata_bmdma_error_handler, + .post_internal_cmd = ata_bmdma_post_internal_cmd, - Checking dev->is_enabled is insufficient as this is not set at - boot for the primary video which is BIOS enabled - */ + .qc_prep = ata_bmdma_qc_prep, + .qc_issue = ata_bmdma_qc_issue, - rc = pci_enable_device(pdev); - if (rc) - return rc; + .sff_irq_clear = ata_bmdma_irq_clear, + .bmdma_setup = ata_bmdma_setup, + .bmdma_start = ata_bmdma_start, + .bmdma_stop = ata_bmdma_stop, + .bmdma_status = ata_bmdma_status, - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - disable_dev_on_err = 0; - goto err_out; + .port_start = ata_bmdma_port_start, +}; +EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); + +const struct ata_port_operations ata_bmdma32_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .sff_data_xfer = ata_sff_data_xfer32, + .port_start = ata_bmdma_port_start32, +}; +EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); + +/** + * ata_bmdma_fill_sg - Fill PCI IDE PRD table + * @qc: Metadata associated with taskfile to be transferred + * + * Fill PCI IDE PRD (scatter-gather) table with segments + * associated with the current disk command. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + */ +static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_bmdma_prd *prd = ap->bmdma_prd; + struct scatterlist *sg; + unsigned int si, pi; + + pi = 0; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + u32 addr, offset; + u32 sg_len, len; + + /* determine if physical DMA addr spans 64K boundary. + * Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + offset = addr & 0xffff; + len = sg_len; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + prd[pi].addr = cpu_to_le32(addr); + prd[pi].flags_len = cpu_to_le32(len & 0xffff); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); + + pi++; + sg_len -= len; + addr += len; + } } - if (legacy_mode) { - if (!request_region(ATA_PRIMARY_CMD, 8, "libata")) { - struct resource *conflict, res; - res.start = ATA_PRIMARY_CMD; - res.end = ATA_PRIMARY_CMD + 8 - 1; - conflict = ____request_resource(&ioport_resource, &res); - while (conflict->child) - conflict = ____request_resource(conflict, &res); - if (!strcmp(conflict->name, "libata")) - legacy_mode |= ATA_PORT_PRIMARY; - else { - disable_dev_on_err = 0; - printk(KERN_WARNING "ata: 0x%0X IDE port busy\n" \ - "ata: conflict with %s\n", - ATA_PRIMARY_CMD, - conflict->name); - } - } else - legacy_mode |= ATA_PORT_PRIMARY; - - if (!request_region(ATA_SECONDARY_CMD, 8, "libata")) { - struct resource *conflict, res; - res.start = ATA_SECONDARY_CMD; - res.end = ATA_SECONDARY_CMD + 8 - 1; - conflict = ____request_resource(&ioport_resource, &res); - while (conflict->child) - conflict = ____request_resource(conflict, &res); - if (!strcmp(conflict->name, "libata")) - legacy_mode |= ATA_PORT_SECONDARY; - else { - disable_dev_on_err = 0; - printk(KERN_WARNING "ata: 0x%X IDE port busy\n" \ - "ata: conflict with %s\n", - ATA_SECONDARY_CMD, - conflict->name); + prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); +} + +/** + * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table + * @qc: Metadata associated with taskfile to be transferred + * + * Fill PCI IDE PRD (scatter-gather) table with segments + * associated with the current disk command. Perform the fill + * so that we avoid writing any length 64K records for + * controllers that don't follow the spec. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + */ +static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_bmdma_prd *prd = ap->bmdma_prd; + struct scatterlist *sg; + unsigned int si, pi; + + pi = 0; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + u32 addr, offset; + u32 sg_len, len, blen; + + /* determine if physical DMA addr spans 64K boundary. + * Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + offset = addr & 0xffff; + len = sg_len; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + blen = len & 0xffff; + prd[pi].addr = cpu_to_le32(addr); + if (blen == 0) { + /* Some PATA chipsets like the CS5530 can't + cope with 0x0000 meaning 64K as the spec + says */ + prd[pi].flags_len = cpu_to_le32(0x8000); + blen = 0x8000; + prd[++pi].addr = cpu_to_le32(addr + 0x8000); } - } else - legacy_mode |= ATA_PORT_SECONDARY; + prd[pi].flags_len = cpu_to_le32(blen); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); + + pi++; + sg_len -= len; + addr += len; + } + } + + prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); +} + +/** + * ata_bmdma_qc_prep - Prepare taskfile for submission + * @qc: Metadata associated with taskfile to be prepared + * + * Prepare ATA taskfile for submission. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) +{ + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + ata_bmdma_fill_sg(qc); +} +EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); + +/** + * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission + * @qc: Metadata associated with taskfile to be prepared + * + * Prepare ATA taskfile for submission. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) +{ + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + ata_bmdma_fill_sg_dumb(qc); +} +EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); + +/** + * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller + * @qc: command to issue to device + * + * This function issues a PIO, NODATA or DMA command to a + * SFF/BMDMA controller. PIO and NODATA are handled by + * ata_sff_qc_issue(). + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * Zero on success, AC_ERR_* mask on failure + */ +unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_link *link = qc->dev->link; + + /* defer PIO handling to sff_qc_issue */ + if (!ata_is_dma(qc->tf.protocol)) + return ata_sff_qc_issue(qc); + + /* select the device */ + ata_dev_select(ap, qc->dev->devno, 1, 0); + + /* start the command */ + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); + + ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->bmdma_setup(qc); /* set up bmdma */ + ap->ops->bmdma_start(qc); /* initiate bmdma */ + ap->hsm_task_state = HSM_ST_LAST; + break; + + case ATAPI_PROT_DMA: + WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); + + ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->bmdma_setup(qc); /* set up bmdma */ + ap->hsm_task_state = HSM_ST_FIRST; + + /* send cdb by polling if no cdb interrupt */ + if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) + ata_sff_queue_pio_task(link, 0); + break; + + default: + WARN_ON(1); + return AC_ERR_SYSTEM; } - /* we have legacy mode, but all ports are unavailable */ - if (legacy_mode == (1 << 3)) { - rc = -EBUSY; - goto err_out_regions; + return 0; +} +EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); + +/** + * ata_bmdma_port_intr - Handle BMDMA port interrupt + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine + * + * Handle port interrupt for given queued command. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * One if interrupt was handled, zero if not (shared irq). + */ +unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + u8 host_stat = 0; + bool bmdma_stopped = false; + unsigned int handled; + + if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { + /* check status of DMA engine */ + host_stat = ap->ops->bmdma_status(ap); + VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); + + /* if it's not our irq... */ + if (!(host_stat & ATA_DMA_INTR)) + return ata_sff_idle_irq(ap); + + /* before we do anything else, clear DMA-Start bit */ + ap->ops->bmdma_stop(qc); + bmdma_stopped = true; + + if (unlikely(host_stat & ATA_DMA_ERR)) { + /* error when transferring data to/from memory */ + qc->err_mask |= AC_ERR_HOST_BUS; + ap->hsm_task_state = HSM_ST_ERR; + } } - /* FIXME: If we get no DMA mask we should fall back to PIO */ - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; + handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); - if (legacy_mode) { - probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode); - } else { - if (n_ports == 2) - probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); - else - probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY); + if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) + ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); + + return handled; +} +EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); + +/** + * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler + * @irq: irq line (unused) + * @dev_instance: pointer to our ata_host information structure + * + * Default interrupt handler for PCI IDE devices. Calls + * ata_bmdma_port_intr() for each port that is not disabled. + * + * LOCKING: + * Obtains host lock during operation. + * + * RETURNS: + * IRQ_NONE or IRQ_HANDLED. + */ +irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) +{ + return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr); +} +EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); + +/** + * ata_bmdma_error_handler - Stock error handler for BMDMA controller + * @ap: port to handle error for + * + * Stock error handler for BMDMA controller. It can handle both + * PATA and SATA controllers. Most BMDMA controllers should be + * able to use this EH as-is or with some added handling before + * and after. + * + * LOCKING: + * Kernel thread context (may sleep) + */ +void ata_bmdma_error_handler(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + unsigned long flags; + bool thaw = false; + + qc = __ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) + qc = NULL; + + /* reset PIO HSM and stop DMA engine */ + spin_lock_irqsave(ap->lock, flags); + + if (qc && ata_is_dma(qc->tf.protocol)) { + u8 host_stat; + + host_stat = ap->ops->bmdma_status(ap); + + /* BMDMA controllers indicate host bus error by + * setting DMA_ERR bit and timing out. As it wasn't + * really a timeout event, adjust error mask and + * cancel frozen state. + */ + if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { + qc->err_mask = AC_ERR_HOST_BUS; + thaw = true; + } + + ap->ops->bmdma_stop(qc); + + /* if we're gonna thaw, make sure IRQ is clear */ + if (thaw) { + ap->ops->sff_check_status(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + } } - if (!probe_ent) { - rc = -ENOMEM; - goto err_out_regions; + + spin_unlock_irqrestore(ap->lock, flags); + + if (thaw) + ata_eh_thaw_port(ap); + + ata_sff_error_handler(ap); +} +EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); + +/** + * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA + * @qc: internal command to clean up + * + * LOCKING: + * Kernel thread context (may sleep) + */ +void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned long flags; + + if (ata_is_dma(qc->tf.protocol)) { + spin_lock_irqsave(ap->lock, flags); + ap->ops->bmdma_stop(qc); + spin_unlock_irqrestore(ap->lock, flags); } +} +EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); + +/** + * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. + * @ap: Port associated with this ATA transaction. + * + * Clear interrupt and error flags in DMA status register. + * + * May be used as the irq_clear() entry in ata_port_operations. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_irq_clear(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; - pci_set_master(pdev); + if (!mmio) + return; - /* FIXME: check ata_device_add return */ - ata_device_add(probe_ent); + iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); +} +EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); - kfree(probe_ent); +/** + * ata_bmdma_setup - Set up PCI IDE BMDMA transaction + * @qc: Info associated with this ATA transaction. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + u8 dmactl; + + /* load PRD table addr. */ + mb(); /* make sure PRD table writes are visible to controller */ + iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); + + /* specify data direction, triple-check start bit is clear */ + dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); + if (!rw) + dmactl |= ATA_DMA_WR; + iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} +EXPORT_SYMBOL_GPL(ata_bmdma_setup); + +/** + * ata_bmdma_start - Start a PCI IDE BMDMA transaction + * @qc: Info associated with this ATA transaction. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + u8 dmactl; + + /* start host DMA transaction */ + dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* Strictly, one may wish to issue an ioread8() here, to + * flush the mmio write. However, control also passes + * to the hardware at this point, and it will interrupt + * us when we are to resume control. So, in effect, + * we don't care when the mmio write flushes. + * Further, a read of the DMA status register _immediately_ + * following the write may not be what certain flaky hardware + * is expected, so I think it is best to not add a readb() + * without first all the MMIO ATA cards/mobos. + * Or maybe I'm just being paranoid. + * + * FIXME: The posting of this write means I/O starts are + * unnecessarily delayed for MMIO + */ +} +EXPORT_SYMBOL_GPL(ata_bmdma_start); + +/** + * ata_bmdma_stop - Stop PCI IDE BMDMA transfer + * @qc: Command we are ending DMA for + * + * Clears the ATA_DMA_START flag in the dma control register + * + * May be used as the bmdma_stop() entry in ata_port_operations. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + /* clear start/stop bit */ + iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, + mmio + ATA_DMA_CMD); + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_sff_dma_pause(ap); +} +EXPORT_SYMBOL_GPL(ata_bmdma_stop); + +/** + * ata_bmdma_status - Read PCI IDE BMDMA status + * @ap: Port associated with this ATA transaction. + * + * Read and return BMDMA status register. + * + * May be used as the bmdma_status() entry in ata_port_operations. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +u8 ata_bmdma_status(struct ata_port *ap) +{ + return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); +} +EXPORT_SYMBOL_GPL(ata_bmdma_status); + + +/** + * ata_bmdma_port_start - Set port up for bmdma. + * @ap: Port to initialize + * + * Called just after data structures for each port are + * initialized. Allocates space for PRD table. + * + * May be used as the port_start() entry in ata_port_operations. + * + * LOCKING: + * Inherited from caller. + */ +int ata_bmdma_port_start(struct ata_port *ap) +{ + if (ap->mwdma_mask || ap->udma_mask) { + ap->bmdma_prd = + dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ, + &ap->bmdma_prd_dma, GFP_KERNEL); + if (!ap->bmdma_prd) + return -ENOMEM; + } return 0; +} +EXPORT_SYMBOL_GPL(ata_bmdma_port_start); -err_out_regions: - if (legacy_mode & ATA_PORT_PRIMARY) - release_region(ATA_PRIMARY_CMD, 8); - if (legacy_mode & ATA_PORT_SECONDARY) - release_region(ATA_SECONDARY_CMD, 8); - pci_release_regions(pdev); -err_out: - if (disable_dev_on_err) - pci_disable_device(pdev); - return rc; +/** + * ata_bmdma_port_start32 - Set port up for dma. + * @ap: Port to initialize + * + * Called just after data structures for each port are + * initialized. Enables 32bit PIO and allocates space for PRD + * table. + * + * May be used as the port_start() entry in ata_port_operations for + * devices that are capable of 32bit PIO. + * + * LOCKING: + * Inherited from caller. + */ +int ata_bmdma_port_start32(struct ata_port *ap) +{ + ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; + return ata_bmdma_port_start(ap); } +EXPORT_SYMBOL_GPL(ata_bmdma_port_start32); + +#ifdef CONFIG_PCI /** - * ata_pci_clear_simplex - attempt to kick device out of simplex + * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex * @pdev: PCI device * * Some PCI ATA devices report simplex mode but in fact can be told to - * enter non simplex mode. This implements the neccessary logic to + * enter non simplex mode. This implements the necessary logic to * perform the task on such devices. Calling it on other devices will * have -undefined- behaviour. */ - -int ata_pci_clear_simplex(struct pci_dev *pdev) +int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) { unsigned long bmdma = pci_resource_start(pdev, 4); u8 simplex; @@ -1106,16 +3169,169 @@ int ata_pci_clear_simplex(struct pci_dev *pdev) return -EOPNOTSUPP; return 0; } +EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); + +static void ata_bmdma_nodma(struct ata_host *host, const char *reason) +{ + int i; + + dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason); + + for (i = 0; i < 2; i++) { + host->ports[i]->mwdma_mask = 0; + host->ports[i]->udma_mask = 0; + } +} + +/** + * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host + * @host: target ATA host + * + * Acquire PCI BMDMA resources and initialize @host accordingly. + * + * LOCKING: + * Inherited from calling layer (may sleep). + */ +void ata_pci_bmdma_init(struct ata_host *host) +{ + struct device *gdev = host->dev; + struct pci_dev *pdev = to_pci_dev(gdev); + int i, rc; + + /* No BAR4 allocation: No DMA */ + if (pci_resource_start(pdev, 4) == 0) { + ata_bmdma_nodma(host, "BAR4 is zero"); + return; + } -unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask) + /* + * Some controllers require BMDMA region to be initialized + * even if DMA is not in use to clear IRQ status via + * ->sff_irq_clear method. Try to initialize bmdma_addr + * regardless of dma masks. + */ + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + ata_bmdma_nodma(host, "failed to set dma mask"); + if (!rc) { + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + ata_bmdma_nodma(host, + "failed to set consistent dma mask"); + } + + /* request and iomap DMA region */ + rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); + if (rc) { + ata_bmdma_nodma(host, "failed to request/iomap BAR4"); + return; + } + host->iomap = pcim_iomap_table(pdev); + + for (i = 0; i < 2; i++) { + struct ata_port *ap = host->ports[i]; + void __iomem *bmdma = host->iomap[4] + 8 * i; + + if (ata_port_is_dummy(ap)) + continue; + + ap->ioaddr.bmdma_addr = bmdma; + if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && + (ioread8(bmdma + 2) & 0x80)) + host->flags |= ATA_HOST_SIMPLEX; + + ata_port_desc(ap, "bmdma 0x%llx", + (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); + } +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); + +/** + * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host + * @pdev: target PCI device + * @ppi: array of port_info, must be enough for two ports + * @r_host: out argument for the initialized ATA host + * + * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI + * resources and initialize it accordingly in one go. + * + * LOCKING: + * Inherited from calling layer (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct ata_host **r_host) { - /* Filter out DMA modes if the device has been configured by - the BIOS as PIO only */ + int rc; - if (ap->ioaddr.bmdma_addr == 0) - xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); - return xfer_mask; + rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); + if (rc) + return rc; + + ata_pci_bmdma_init(*r_host); + return 0; } +EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); + +/** + * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller + * @pdev: Controller to be initialized + * @ppi: array of port_info, must be enough for two ports + * @sht: scsi_host_template to use when registering the host + * @host_priv: host private_data + * @hflags: host flags + * + * This function is similar to ata_pci_sff_init_one() but also + * takes care of BMDMA initialization. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, negative on errno-based value on error. + */ +int ata_pci_bmdma_init_one(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct scsi_host_template *sht, void *host_priv, + int hflags) +{ + return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1); +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); #endif /* CONFIG_PCI */ +#endif /* CONFIG_ATA_BMDMA */ + +/** + * ata_sff_port_init - Initialize SFF/BMDMA ATA port + * @ap: Port to initialize + * + * Called on port allocation to initialize SFF/BMDMA specific + * fields. + * + * LOCKING: + * None. + */ +void ata_sff_port_init(struct ata_port *ap) +{ + INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task); + ap->ctl = ATA_DEVCTL_OBS; + ap->last_ctl = 0xFF; +} +int __init ata_sff_init(void) +{ + ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE); + if (!ata_sff_wq) + return -ENOMEM; + + return 0; +} + +void ata_sff_exit(void) +{ + destroy_workqueue(ata_sff_wq); +} diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c new file mode 100644 index 00000000000..e3741322822 --- /dev/null +++ b/drivers/ata/libata-transport.c @@ -0,0 +1,785 @@ +/* + * Copyright 2008 ioogle, Inc. All rights reserved. + * Released under GPL v2. + * + * Libata transport class. + * + * The ATA transport class contains common code to deal with ATA HBAs, + * an approximated representation of ATA topologies in the driver model, + * and various sysfs attributes to expose these topologies and management + * interfaces to user-space. + * + * There are 3 objects defined in in this class: + * - ata_port + * - ata_link + * - ata_device + * Each port has a link object. Each link can have up to two devices for PATA + * and generally one for SATA. + * If there is SATA port multiplier [PMP], 15 additional ata_link object are + * created. + * + * These objects are created when the ata host is initialized and when a PMP is + * found. They are removed only when the HBA is removed, cleaned before the + * error handler runs. + */ + + +#include <linux/kernel.h> +#include <linux/blkdev.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <scsi/scsi_transport.h> +#include <linux/libata.h> +#include <linux/hdreg.h> +#include <linux/uaccess.h> +#include <linux/pm_runtime.h> + +#include "libata.h" +#include "libata-transport.h" + +#define ATA_PORT_ATTRS 3 +#define ATA_LINK_ATTRS 3 +#define ATA_DEV_ATTRS 9 + +struct scsi_transport_template; +struct scsi_transport_template *ata_scsi_transport_template; + +struct ata_internal { + struct scsi_transport_template t; + + struct device_attribute private_port_attrs[ATA_PORT_ATTRS]; + struct device_attribute private_link_attrs[ATA_LINK_ATTRS]; + struct device_attribute private_dev_attrs[ATA_DEV_ATTRS]; + + struct transport_container link_attr_cont; + struct transport_container dev_attr_cont; + + /* + * The array of null terminated pointers to attributes + * needed by scsi_sysfs.c + */ + struct device_attribute *link_attrs[ATA_LINK_ATTRS + 1]; + struct device_attribute *port_attrs[ATA_PORT_ATTRS + 1]; + struct device_attribute *dev_attrs[ATA_DEV_ATTRS + 1]; +}; +#define to_ata_internal(tmpl) container_of(tmpl, struct ata_internal, t) + + +#define tdev_to_device(d) \ + container_of((d), struct ata_device, tdev) +#define transport_class_to_dev(dev) \ + tdev_to_device((dev)->parent) + +#define tdev_to_link(d) \ + container_of((d), struct ata_link, tdev) +#define transport_class_to_link(dev) \ + tdev_to_link((dev)->parent) + +#define tdev_to_port(d) \ + container_of((d), struct ata_port, tdev) +#define transport_class_to_port(dev) \ + tdev_to_port((dev)->parent) + + +/* Device objects are always created whit link objects */ +static int ata_tdev_add(struct ata_device *dev); +static void ata_tdev_delete(struct ata_device *dev); + + +/* + * Hack to allow attributes of the same name in different objects. + */ +#define ATA_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ + struct device_attribute device_attr_##_prefix##_##_name = \ + __ATTR(_name,_mode,_show,_store) + +#define ata_bitfield_name_match(title, table) \ +static ssize_t \ +get_ata_##title##_names(u32 table_key, char *buf) \ +{ \ + char *prefix = ""; \ + ssize_t len = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + if (table[i].value & table_key) { \ + len += sprintf(buf + len, "%s%s", \ + prefix, table[i].name); \ + prefix = ", "; \ + } \ + } \ + len += sprintf(buf + len, "\n"); \ + return len; \ +} + +#define ata_bitfield_name_search(title, table) \ +static ssize_t \ +get_ata_##title##_names(u32 table_key, char *buf) \ +{ \ + ssize_t len = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + if (table[i].value == table_key) { \ + len += sprintf(buf + len, "%s", \ + table[i].name); \ + break; \ + } \ + } \ + len += sprintf(buf + len, "\n"); \ + return len; \ +} + +static struct { + u32 value; + char *name; +} ata_class_names[] = { + { ATA_DEV_UNKNOWN, "unknown" }, + { ATA_DEV_ATA, "ata" }, + { ATA_DEV_ATA_UNSUP, "ata" }, + { ATA_DEV_ATAPI, "atapi" }, + { ATA_DEV_ATAPI_UNSUP, "atapi" }, + { ATA_DEV_PMP, "pmp" }, + { ATA_DEV_PMP_UNSUP, "pmp" }, + { ATA_DEV_SEMB, "semb" }, + { ATA_DEV_SEMB_UNSUP, "semb" }, + { ATA_DEV_NONE, "none" } +}; +ata_bitfield_name_search(class, ata_class_names) + + +static struct { + u32 value; + char *name; +} ata_err_names[] = { + { AC_ERR_DEV, "DeviceError" }, + { AC_ERR_HSM, "HostStateMachineError" }, + { AC_ERR_TIMEOUT, "Timeout" }, + { AC_ERR_MEDIA, "MediaError" }, + { AC_ERR_ATA_BUS, "BusError" }, + { AC_ERR_HOST_BUS, "HostBusError" }, + { AC_ERR_SYSTEM, "SystemError" }, + { AC_ERR_INVALID, "InvalidArg" }, + { AC_ERR_OTHER, "Unknown" }, + { AC_ERR_NODEV_HINT, "NoDeviceHint" }, + { AC_ERR_NCQ, "NCQError" } +}; +ata_bitfield_name_match(err, ata_err_names) + +static struct { + u32 value; + char *name; +} ata_xfer_names[] = { + { XFER_UDMA_7, "XFER_UDMA_7" }, + { XFER_UDMA_6, "XFER_UDMA_6" }, + { XFER_UDMA_5, "XFER_UDMA_5" }, + { XFER_UDMA_4, "XFER_UDMA_4" }, + { XFER_UDMA_3, "XFER_UDMA_3" }, + { XFER_UDMA_2, "XFER_UDMA_2" }, + { XFER_UDMA_1, "XFER_UDMA_1" }, + { XFER_UDMA_0, "XFER_UDMA_0" }, + { XFER_MW_DMA_4, "XFER_MW_DMA_4" }, + { XFER_MW_DMA_3, "XFER_MW_DMA_3" }, + { XFER_MW_DMA_2, "XFER_MW_DMA_2" }, + { XFER_MW_DMA_1, "XFER_MW_DMA_1" }, + { XFER_MW_DMA_0, "XFER_MW_DMA_0" }, + { XFER_SW_DMA_2, "XFER_SW_DMA_2" }, + { XFER_SW_DMA_1, "XFER_SW_DMA_1" }, + { XFER_SW_DMA_0, "XFER_SW_DMA_0" }, + { XFER_PIO_6, "XFER_PIO_6" }, + { XFER_PIO_5, "XFER_PIO_5" }, + { XFER_PIO_4, "XFER_PIO_4" }, + { XFER_PIO_3, "XFER_PIO_3" }, + { XFER_PIO_2, "XFER_PIO_2" }, + { XFER_PIO_1, "XFER_PIO_1" }, + { XFER_PIO_0, "XFER_PIO_0" }, + { XFER_PIO_SLOW, "XFER_PIO_SLOW" } +}; +ata_bitfield_name_match(xfer,ata_xfer_names) + +/* + * ATA Port attributes + */ +#define ata_port_show_simple(field, name, format_string, cast) \ +static ssize_t \ +show_ata_port_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ata_port *ap = transport_class_to_port(dev); \ + \ + return snprintf(buf, 20, format_string, cast ap->field); \ +} + +#define ata_port_simple_attr(field, name, format_string, type) \ + ata_port_show_simple(field, name, format_string, (type)) \ +static DEVICE_ATTR(name, S_IRUGO, show_ata_port_##name, NULL) + +ata_port_simple_attr(nr_pmp_links, nr_pmp_links, "%d\n", int); +ata_port_simple_attr(stats.idle_irq, idle_irq, "%ld\n", unsigned long); +ata_port_simple_attr(local_port_no, port_no, "%u\n", unsigned int); + +static DECLARE_TRANSPORT_CLASS(ata_port_class, + "ata_port", NULL, NULL, NULL); + +static void ata_tport_release(struct device *dev) +{ + put_device(dev->parent); +} + +/** + * ata_is_port -- check if a struct device represents a ATA port + * @dev: device to check + * + * Returns: + * %1 if the device represents a ATA Port, %0 else + */ +static int ata_is_port(const struct device *dev) +{ + return dev->release == ata_tport_release; +} + +static int ata_tport_match(struct attribute_container *cont, + struct device *dev) +{ + if (!ata_is_port(dev)) + return 0; + return &ata_scsi_transport_template->host_attrs.ac == cont; +} + +/** + * ata_tport_delete -- remove ATA PORT + * @port: ATA PORT to remove + * + * Removes the specified ATA PORT. Remove the associated link as well. + */ +void ata_tport_delete(struct ata_port *ap) +{ + struct device *dev = &ap->tdev; + + ata_tlink_delete(&ap->link); + + transport_remove_device(dev); + device_del(dev); + transport_destroy_device(dev); + put_device(dev); +} + +/** ata_tport_add - initialize a transport ATA port structure + * + * @parent: parent device + * @ap: existing ata_port structure + * + * Initialize a ATA port structure for sysfs. It will be added to the device + * tree below the device specified by @parent which could be a PCI device. + * + * Returns %0 on success + */ +int ata_tport_add(struct device *parent, + struct ata_port *ap) +{ + int error; + struct device *dev = &ap->tdev; + + device_initialize(dev); + dev->type = &ata_port_type; + + dev->parent = get_device(parent); + dev->release = ata_tport_release; + dev_set_name(dev, "ata%d", ap->print_id); + transport_setup_device(dev); + ata_acpi_bind_port(ap); + error = device_add(dev); + if (error) { + goto tport_err; + } + + device_enable_async_suspend(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_forbid(dev); + + transport_add_device(dev); + transport_configure_device(dev); + + error = ata_tlink_add(&ap->link); + if (error) { + goto tport_link_err; + } + return 0; + + tport_link_err: + transport_remove_device(dev); + device_del(dev); + + tport_err: + transport_destroy_device(dev); + put_device(dev); + return error; +} + + +/* + * ATA link attributes + */ +static int noop(int x) { return x; } + +#define ata_link_show_linkspeed(field, format) \ +static ssize_t \ +show_ata_link_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ata_link *link = transport_class_to_link(dev); \ + \ + return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \ +} + +#define ata_link_linkspeed_attr(field, format) \ + ata_link_show_linkspeed(field, format) \ +static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL) + +ata_link_linkspeed_attr(hw_sata_spd_limit, fls); +ata_link_linkspeed_attr(sata_spd_limit, fls); +ata_link_linkspeed_attr(sata_spd, noop); + + +static DECLARE_TRANSPORT_CLASS(ata_link_class, + "ata_link", NULL, NULL, NULL); + +static void ata_tlink_release(struct device *dev) +{ + put_device(dev->parent); +} + +/** + * ata_is_link -- check if a struct device represents a ATA link + * @dev: device to check + * + * Returns: + * %1 if the device represents a ATA link, %0 else + */ +static int ata_is_link(const struct device *dev) +{ + return dev->release == ata_tlink_release; +} + +static int ata_tlink_match(struct attribute_container *cont, + struct device *dev) +{ + struct ata_internal* i = to_ata_internal(ata_scsi_transport_template); + if (!ata_is_link(dev)) + return 0; + return &i->link_attr_cont.ac == cont; +} + +/** + * ata_tlink_delete -- remove ATA LINK + * @port: ATA LINK to remove + * + * Removes the specified ATA LINK. remove associated ATA device(s) as well. + */ +void ata_tlink_delete(struct ata_link *link) +{ + struct device *dev = &link->tdev; + struct ata_device *ata_dev; + + ata_for_each_dev(ata_dev, link, ALL) { + ata_tdev_delete(ata_dev); + } + + transport_remove_device(dev); + device_del(dev); + transport_destroy_device(dev); + put_device(dev); +} + +/** + * ata_tlink_add -- initialize a transport ATA link structure + * @link: allocated ata_link structure. + * + * Initialize an ATA LINK structure for sysfs. It will be added in the + * device tree below the ATA PORT it belongs to. + * + * Returns %0 on success + */ +int ata_tlink_add(struct ata_link *link) +{ + struct device *dev = &link->tdev; + struct ata_port *ap = link->ap; + struct ata_device *ata_dev; + int error; + + device_initialize(dev); + dev->parent = get_device(&ap->tdev); + dev->release = ata_tlink_release; + if (ata_is_host_link(link)) + dev_set_name(dev, "link%d", ap->print_id); + else + dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp); + + transport_setup_device(dev); + + error = device_add(dev); + if (error) { + goto tlink_err; + } + + transport_add_device(dev); + transport_configure_device(dev); + + ata_for_each_dev(ata_dev, link, ALL) { + error = ata_tdev_add(ata_dev); + if (error) { + goto tlink_dev_err; + } + } + return 0; + tlink_dev_err: + while (--ata_dev >= link->device) { + ata_tdev_delete(ata_dev); + } + transport_remove_device(dev); + device_del(dev); + tlink_err: + transport_destroy_device(dev); + put_device(dev); + return error; +} + +/* + * ATA device attributes + */ + +#define ata_dev_show_class(title, field) \ +static ssize_t \ +show_ata_dev_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ata_device *ata_dev = transport_class_to_dev(dev); \ + \ + return get_ata_##title##_names(ata_dev->field, buf); \ +} + +#define ata_dev_attr(title, field) \ + ata_dev_show_class(title, field) \ +static DEVICE_ATTR(field, S_IRUGO, show_ata_dev_##field, NULL) + +ata_dev_attr(class, class); +ata_dev_attr(xfer, pio_mode); +ata_dev_attr(xfer, dma_mode); +ata_dev_attr(xfer, xfer_mode); + + +#define ata_dev_show_simple(field, format_string, cast) \ +static ssize_t \ +show_ata_dev_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ata_device *ata_dev = transport_class_to_dev(dev); \ + \ + return snprintf(buf, 20, format_string, cast ata_dev->field); \ +} + +#define ata_dev_simple_attr(field, format_string, type) \ + ata_dev_show_simple(field, format_string, (type)) \ +static DEVICE_ATTR(field, S_IRUGO, \ + show_ata_dev_##field, NULL) + +ata_dev_simple_attr(spdn_cnt, "%d\n", int); + +struct ata_show_ering_arg { + char* buf; + int written; +}; + +static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg) +{ + struct ata_show_ering_arg* arg = void_arg; + struct timespec time; + + jiffies_to_timespec(ent->timestamp,&time); + arg->written += sprintf(arg->buf + arg->written, + "[%5lu.%06lu]", + time.tv_sec, time.tv_nsec); + arg->written += get_ata_err_names(ent->err_mask, + arg->buf + arg->written); + return 0; +} + +static ssize_t +show_ata_dev_ering(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ata_device *ata_dev = transport_class_to_dev(dev); + struct ata_show_ering_arg arg = { buf, 0 }; + + ata_ering_map(&ata_dev->ering, ata_show_ering, &arg); + return arg.written; +} + + +static DEVICE_ATTR(ering, S_IRUGO, show_ata_dev_ering, NULL); + +static ssize_t +show_ata_dev_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ata_device *ata_dev = transport_class_to_dev(dev); + int written = 0, i = 0; + + if (ata_dev->class == ATA_DEV_PMP) + return 0; + for(i=0;i<ATA_ID_WORDS;i++) { + written += snprintf(buf+written, 20, "%04x%c", + ata_dev->id[i], + ((i+1) & 7) ? ' ' : '\n'); + } + return written; +} + +static DEVICE_ATTR(id, S_IRUGO, show_ata_dev_id, NULL); + +static ssize_t +show_ata_dev_gscr(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ata_device *ata_dev = transport_class_to_dev(dev); + int written = 0, i = 0; + + if (ata_dev->class != ATA_DEV_PMP) + return 0; + for(i=0;i<SATA_PMP_GSCR_DWORDS;i++) { + written += snprintf(buf+written, 20, "%08x%c", + ata_dev->gscr[i], + ((i+1) & 3) ? ' ' : '\n'); + } + if (SATA_PMP_GSCR_DWORDS & 3) + buf[written-1] = '\n'; + return written; +} + +static DEVICE_ATTR(gscr, S_IRUGO, show_ata_dev_gscr, NULL); + +static DECLARE_TRANSPORT_CLASS(ata_dev_class, + "ata_device", NULL, NULL, NULL); + +static void ata_tdev_release(struct device *dev) +{ + put_device(dev->parent); +} + +/** + * ata_is_ata_dev -- check if a struct device represents a ATA device + * @dev: device to check + * + * Returns: + * %1 if the device represents a ATA device, %0 else + */ +static int ata_is_ata_dev(const struct device *dev) +{ + return dev->release == ata_tdev_release; +} + +static int ata_tdev_match(struct attribute_container *cont, + struct device *dev) +{ + struct ata_internal* i = to_ata_internal(ata_scsi_transport_template); + if (!ata_is_ata_dev(dev)) + return 0; + return &i->dev_attr_cont.ac == cont; +} + +/** + * ata_tdev_free -- free a ATA LINK + * @dev: ATA PHY to free + * + * Frees the specified ATA PHY. + * + * Note: + * This function must only be called on a PHY that has not + * successfully been added using ata_tdev_add(). + */ +static void ata_tdev_free(struct ata_device *dev) +{ + transport_destroy_device(&dev->tdev); + put_device(&dev->tdev); +} + +/** + * ata_tdev_delete -- remove ATA device + * @port: ATA PORT to remove + * + * Removes the specified ATA device. + */ +static void ata_tdev_delete(struct ata_device *ata_dev) +{ + struct device *dev = &ata_dev->tdev; + + transport_remove_device(dev); + device_del(dev); + ata_tdev_free(ata_dev); +} + + +/** + * ata_tdev_add -- initialize a transport ATA device structure. + * @ata_dev: ata_dev structure. + * + * Initialize an ATA device structure for sysfs. It will be added in the + * device tree below the ATA LINK device it belongs to. + * + * Returns %0 on success + */ +static int ata_tdev_add(struct ata_device *ata_dev) +{ + struct device *dev = &ata_dev->tdev; + struct ata_link *link = ata_dev->link; + struct ata_port *ap = link->ap; + int error; + + device_initialize(dev); + dev->parent = get_device(&link->tdev); + dev->release = ata_tdev_release; + if (ata_is_host_link(link)) + dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno); + else + dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp); + + transport_setup_device(dev); + ata_acpi_bind_dev(ata_dev); + error = device_add(dev); + if (error) { + ata_tdev_free(ata_dev); + return error; + } + + transport_add_device(dev); + transport_configure_device(dev); + return 0; +} + + +/* + * Setup / Teardown code + */ + +#define SETUP_TEMPLATE(attrb, field, perm, test) \ + i->private_##attrb[count] = dev_attr_##field; \ + i->private_##attrb[count].attr.mode = perm; \ + i->attrb[count] = &i->private_##attrb[count]; \ + if (test) \ + count++ + +#define SETUP_LINK_ATTRIBUTE(field) \ + SETUP_TEMPLATE(link_attrs, field, S_IRUGO, 1) + +#define SETUP_PORT_ATTRIBUTE(field) \ + SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) + +#define SETUP_DEV_ATTRIBUTE(field) \ + SETUP_TEMPLATE(dev_attrs, field, S_IRUGO, 1) + +/** + * ata_attach_transport -- instantiate ATA transport template + */ +struct scsi_transport_template *ata_attach_transport(void) +{ + struct ata_internal *i; + int count; + + i = kzalloc(sizeof(struct ata_internal), GFP_KERNEL); + if (!i) + return NULL; + + i->t.eh_strategy_handler = ata_scsi_error; + i->t.eh_timed_out = ata_scsi_timed_out; + i->t.user_scan = ata_scsi_user_scan; + + i->t.host_attrs.ac.attrs = &i->port_attrs[0]; + i->t.host_attrs.ac.class = &ata_port_class.class; + i->t.host_attrs.ac.match = ata_tport_match; + transport_container_register(&i->t.host_attrs); + + i->link_attr_cont.ac.class = &ata_link_class.class; + i->link_attr_cont.ac.attrs = &i->link_attrs[0]; + i->link_attr_cont.ac.match = ata_tlink_match; + transport_container_register(&i->link_attr_cont); + + i->dev_attr_cont.ac.class = &ata_dev_class.class; + i->dev_attr_cont.ac.attrs = &i->dev_attrs[0]; + i->dev_attr_cont.ac.match = ata_tdev_match; + transport_container_register(&i->dev_attr_cont); + + count = 0; + SETUP_PORT_ATTRIBUTE(nr_pmp_links); + SETUP_PORT_ATTRIBUTE(idle_irq); + SETUP_PORT_ATTRIBUTE(port_no); + BUG_ON(count > ATA_PORT_ATTRS); + i->port_attrs[count] = NULL; + + count = 0; + SETUP_LINK_ATTRIBUTE(hw_sata_spd_limit); + SETUP_LINK_ATTRIBUTE(sata_spd_limit); + SETUP_LINK_ATTRIBUTE(sata_spd); + BUG_ON(count > ATA_LINK_ATTRS); + i->link_attrs[count] = NULL; + + count = 0; + SETUP_DEV_ATTRIBUTE(class); + SETUP_DEV_ATTRIBUTE(pio_mode); + SETUP_DEV_ATTRIBUTE(dma_mode); + SETUP_DEV_ATTRIBUTE(xfer_mode); + SETUP_DEV_ATTRIBUTE(spdn_cnt); + SETUP_DEV_ATTRIBUTE(ering); + SETUP_DEV_ATTRIBUTE(id); + SETUP_DEV_ATTRIBUTE(gscr); + BUG_ON(count > ATA_DEV_ATTRS); + i->dev_attrs[count] = NULL; + + return &i->t; +} + +/** + * ata_release_transport -- release ATA transport template instance + * @t: transport template instance + */ +void ata_release_transport(struct scsi_transport_template *t) +{ + struct ata_internal *i = to_ata_internal(t); + + transport_container_unregister(&i->t.host_attrs); + transport_container_unregister(&i->link_attr_cont); + transport_container_unregister(&i->dev_attr_cont); + + kfree(i); +} + +__init int libata_transport_init(void) +{ + int error; + + error = transport_class_register(&ata_link_class); + if (error) + goto out_unregister_transport; + error = transport_class_register(&ata_port_class); + if (error) + goto out_unregister_link; + error = transport_class_register(&ata_dev_class); + if (error) + goto out_unregister_port; + return 0; + + out_unregister_port: + transport_class_unregister(&ata_port_class); + out_unregister_link: + transport_class_unregister(&ata_link_class); + out_unregister_transport: + return error; + +} + +void __exit libata_transport_exit(void) +{ + transport_class_unregister(&ata_link_class); + transport_class_unregister(&ata_port_class); + transport_class_unregister(&ata_dev_class); +} diff --git a/drivers/ata/libata-transport.h b/drivers/ata/libata-transport.h new file mode 100644 index 00000000000..2820cf864f1 --- /dev/null +++ b/drivers/ata/libata-transport.h @@ -0,0 +1,18 @@ +#ifndef _LIBATA_TRANSPORT_H +#define _LIBATA_TRANSPORT_H + + +extern struct scsi_transport_template *ata_scsi_transport_template; + +int ata_tlink_add(struct ata_link *link); +void ata_tlink_delete(struct ata_link *link); + +int ata_tport_add(struct device *parent, struct ata_port *ap); +void ata_tport_delete(struct ata_port *ap); + +struct scsi_transport_template *ata_attach_transport(void); +void ata_release_transport(struct scsi_transport_template *t); + +__init int libata_transport_init(void); +void __exit libata_transport_exit(void); +#endif diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c new file mode 100644 index 00000000000..f3a65a3140d --- /dev/null +++ b/drivers/ata/libata-zpodd.c @@ -0,0 +1,283 @@ +#include <linux/libata.h> +#include <linux/cdrom.h> +#include <linux/pm_runtime.h> +#include <linux/module.h> +#include <linux/pm_qos.h> +#include <scsi/scsi_device.h> + +#include "libata.h" + +static int zpodd_poweroff_delay = 30; /* 30 seconds for power off delay */ +module_param(zpodd_poweroff_delay, int, 0644); +MODULE_PARM_DESC(zpodd_poweroff_delay, "Poweroff delay for ZPODD in seconds"); + +enum odd_mech_type { + ODD_MECH_TYPE_SLOT, + ODD_MECH_TYPE_DRAWER, + ODD_MECH_TYPE_UNSUPPORTED, +}; + +struct zpodd { + enum odd_mech_type mech_type; /* init during probe, RO afterwards */ + struct ata_device *dev; + + /* The following fields are synchronized by PM core. */ + bool from_notify; /* resumed as a result of + * acpi wake notification */ + bool zp_ready; /* ZP ready state */ + unsigned long last_ready; /* last ZP ready timestamp */ + bool zp_sampled; /* ZP ready state sampled */ + bool powered_off; /* ODD is powered off + * during suspend */ +}; + +static int eject_tray(struct ata_device *dev) +{ + struct ata_taskfile tf; + const char cdb[] = { GPCMD_START_STOP_UNIT, + 0, 0, 0, + 0x02, /* LoEj */ + 0, 0, 0, 0, 0, 0, 0, + }; + + ata_tf_init(dev, &tf); + tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.command = ATA_CMD_PACKET; + tf.protocol = ATAPI_PROT_NODATA; + + return ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); +} + +/* Per the spec, only slot type and drawer type ODD can be supported */ +static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) +{ + char buf[16]; + unsigned int ret; + struct rm_feature_desc *desc = (void *)(buf + 8); + struct ata_taskfile tf; + char cdb[] = { GPCMD_GET_CONFIGURATION, + 2, /* only 1 feature descriptor requested */ + 0, 3, /* 3, removable medium feature */ + 0, 0, 0,/* reserved */ + 0, sizeof(buf), + 0, 0, 0, + }; + + ata_tf_init(dev, &tf); + tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.command = ATA_CMD_PACKET; + tf.protocol = ATAPI_PROT_PIO; + tf.lbam = sizeof(buf); + + ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, + buf, sizeof(buf), 0); + if (ret) + return ODD_MECH_TYPE_UNSUPPORTED; + + if (be16_to_cpu(desc->feature_code) != 3) + return ODD_MECH_TYPE_UNSUPPORTED; + + if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) + return ODD_MECH_TYPE_SLOT; + else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1) + return ODD_MECH_TYPE_DRAWER; + else + return ODD_MECH_TYPE_UNSUPPORTED; +} + +/* Test if ODD is zero power ready by sense code */ +static bool zpready(struct ata_device *dev) +{ + u8 sense_key, *sense_buf; + unsigned int ret, asc, ascq, add_len; + struct zpodd *zpodd = dev->zpodd; + + ret = atapi_eh_tur(dev, &sense_key); + + if (!ret || sense_key != NOT_READY) + return false; + + sense_buf = dev->link->ap->sector_buf; + ret = atapi_eh_request_sense(dev, sense_buf, sense_key); + if (ret) + return false; + + /* sense valid */ + if ((sense_buf[0] & 0x7f) != 0x70) + return false; + + add_len = sense_buf[7]; + /* has asc and ascq */ + if (add_len < 6) + return false; + + asc = sense_buf[12]; + ascq = sense_buf[13]; + + if (zpodd->mech_type == ODD_MECH_TYPE_SLOT) + /* no media inside */ + return asc == 0x3a; + else + /* no media inside and door closed */ + return asc == 0x3a && ascq == 0x01; +} + +/* + * Update the zpodd->zp_ready field. This field will only be set + * if the ODD has stayed in ZP ready state for zpodd_poweroff_delay + * time, and will be used to decide if power off is allowed. If it + * is set, it will be cleared during resume from powered off state. + */ +void zpodd_on_suspend(struct ata_device *dev) +{ + struct zpodd *zpodd = dev->zpodd; + unsigned long expires; + + if (!zpready(dev)) { + zpodd->zp_sampled = false; + zpodd->zp_ready = false; + return; + } + + if (!zpodd->zp_sampled) { + zpodd->zp_sampled = true; + zpodd->last_ready = jiffies; + return; + } + + expires = zpodd->last_ready + + msecs_to_jiffies(zpodd_poweroff_delay * 1000); + if (time_before(jiffies, expires)) + return; + + zpodd->zp_ready = true; +} + +bool zpodd_zpready(struct ata_device *dev) +{ + struct zpodd *zpodd = dev->zpodd; + return zpodd->zp_ready; +} + +/* + * Enable runtime wake capability through ACPI and set the powered_off flag, + * this flag will be used during resume to decide what operations are needed + * to take. + * + * Also, media poll needs to be silenced, so that it doesn't bring the ODD + * back to full power state every few seconds. + */ +void zpodd_enable_run_wake(struct ata_device *dev) +{ + struct zpodd *zpodd = dev->zpodd; + + sdev_disable_disk_events(dev->sdev); + + zpodd->powered_off = true; + device_set_run_wake(&dev->tdev, true); + acpi_pm_device_run_wake(&dev->tdev, true); +} + +/* Disable runtime wake capability if it is enabled */ +void zpodd_disable_run_wake(struct ata_device *dev) +{ + struct zpodd *zpodd = dev->zpodd; + + if (zpodd->powered_off) { + acpi_pm_device_run_wake(&dev->tdev, false); + device_set_run_wake(&dev->tdev, false); + } +} + +/* + * Post power on processing after the ODD has been recovered. If the + * ODD wasn't powered off during suspend, it doesn't do anything. + * + * For drawer type ODD, if it is powered on due to user pressed the + * eject button, the tray needs to be ejected. This can only be done + * after the ODD has been recovered, i.e. link is initialized and + * device is able to process NON_DATA PIO command, as eject needs to + * send command for the ODD to process. + * + * The from_notify flag set in wake notification handler function + * zpodd_wake_dev represents if power on is due to user's action. + * + * For both types of ODD, several fields need to be reset. + */ +void zpodd_post_poweron(struct ata_device *dev) +{ + struct zpodd *zpodd = dev->zpodd; + + if (!zpodd->powered_off) + return; + + zpodd->powered_off = false; + + if (zpodd->from_notify) { + zpodd->from_notify = false; + if (zpodd->mech_type == ODD_MECH_TYPE_DRAWER) + eject_tray(dev); + } + + zpodd->zp_sampled = false; + zpodd->zp_ready = false; + + sdev_enable_disk_events(dev->sdev); +} + +static void zpodd_wake_dev(acpi_handle handle, u32 event, void *context) +{ + struct ata_device *ata_dev = context; + struct zpodd *zpodd = ata_dev->zpodd; + struct device *dev = &ata_dev->sdev->sdev_gendev; + + if (event == ACPI_NOTIFY_DEVICE_WAKE && pm_runtime_suspended(dev)) { + zpodd->from_notify = true; + pm_runtime_resume(dev); + } +} + +static void ata_acpi_add_pm_notifier(struct ata_device *dev) +{ + acpi_handle handle = ata_dev_acpi_handle(dev); + acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, + zpodd_wake_dev, dev); +} + +static void ata_acpi_remove_pm_notifier(struct ata_device *dev) +{ + acpi_handle handle = ata_dev_acpi_handle(dev); + acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, zpodd_wake_dev); +} + +void zpodd_init(struct ata_device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(&dev->tdev); + enum odd_mech_type mech_type; + struct zpodd *zpodd; + + if (dev->zpodd || !adev || !acpi_device_can_poweroff(adev)) + return; + + mech_type = zpodd_get_mech_type(dev); + if (mech_type == ODD_MECH_TYPE_UNSUPPORTED) + return; + + zpodd = kzalloc(sizeof(struct zpodd), GFP_KERNEL); + if (!zpodd) + return; + + zpodd->mech_type = mech_type; + + ata_acpi_add_pm_notifier(dev); + zpodd->dev = dev; + dev->zpodd = zpodd; + dev_pm_qos_expose_flags(&dev->tdev, 0); +} + +void zpodd_exit(struct ata_device *dev) +{ + ata_acpi_remove_pm_notifier(dev); + kfree(dev->zpodd); + dev->zpodd = NULL; +} diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index a5ecb71390a..45b5ab3a95d 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -29,7 +29,7 @@ #define __LIBATA_H__ #define DRV_NAME "libata" -#define DRV_VERSION "2.00" /* must be exactly four chars */ +#define DRV_VERSION "3.00" /* must be exactly four chars */ struct ata_scsi_args { struct ata_device *dev; @@ -39,84 +39,219 @@ struct ata_scsi_args { }; /* libata-core.c */ -extern struct workqueue_struct *ata_aux_wq; -extern int atapi_enabled; -extern int atapi_dmadir; +enum { + /* flags for ata_dev_read_id() */ + ATA_READID_POSTRESET = (1 << 0), /* reading ID after reset */ + + /* selector for ata_down_xfermask_limit() */ + ATA_DNXFER_PIO = 0, /* speed down PIO */ + ATA_DNXFER_DMA = 1, /* speed down DMA */ + ATA_DNXFER_40C = 2, /* apply 40c cable limit */ + ATA_DNXFER_FORCE_PIO = 3, /* force PIO */ + ATA_DNXFER_FORCE_PIO0 = 4, /* force PIO0 */ + + ATA_DNXFER_QUIET = (1 << 31), +}; + +extern atomic_t ata_print_id; +extern int atapi_passthru16; extern int libata_fua; +extern int libata_noacpi; +extern int libata_allow_tpm; +extern struct device_type ata_port_type; +extern struct ata_link *ata_dev_phys_link(struct ata_device *dev); +extern void ata_force_cbl(struct ata_port *ap); +extern u64 ata_tf_to_lba(const struct ata_taskfile *tf); +extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf); extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); -extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); -extern void ata_dev_disable(struct ata_device *dev); -extern void ata_port_flush_task(struct ata_port *ap); +extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, + u64 block, u32 n_block, unsigned int tf_flags, + unsigned int tag); +extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); extern unsigned ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf, const u8 *cdb, - int dma_dir, void *buf, unsigned int buflen); + int dma_dir, void *buf, unsigned int buflen, + unsigned long timeout); +extern unsigned ata_exec_internal_sg(struct ata_device *dev, + struct ata_taskfile *tf, const u8 *cdb, + int dma_dir, struct scatterlist *sg, + unsigned int n_elem, unsigned long timeout); extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd); +extern int ata_wait_ready(struct ata_link *link, unsigned long deadline, + int (*check_ready)(struct ata_link *link)); extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, - int post_reset, u16 *id); -extern int ata_dev_configure(struct ata_device *dev, int print_info); -extern int sata_down_spd_limit(struct ata_port *ap); -extern int sata_set_spd_needed(struct ata_port *ap); -extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0); -extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev); + unsigned int flags, u16 *id); +extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags); +extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, + unsigned int readid_flags); +extern int ata_dev_configure(struct ata_device *dev); +extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit); +extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); +extern unsigned int ata_dev_set_feature(struct ata_device *dev, + u8 enable, u8 feature); +extern void ata_sg_clean(struct ata_queued_cmd *qc); extern void ata_qc_free(struct ata_queued_cmd *qc); extern void ata_qc_issue(struct ata_queued_cmd *qc); extern void __ata_qc_complete(struct ata_queued_cmd *qc); -extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); -extern void ata_dev_select(struct ata_port *ap, unsigned int device, - unsigned int wait, unsigned int can_sleep); +extern int atapi_check_dma(struct ata_queued_cmd *qc); extern void swap_buf_le16(u16 *buf, unsigned int buf_words); -extern int ata_flush_cache(struct ata_device *dev); +extern bool ata_phys_link_online(struct ata_link *link); +extern bool ata_phys_link_offline(struct ata_link *link); extern void ata_dev_init(struct ata_device *dev); +extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp); +extern int sata_link_init_spd(struct ata_link *link); extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); -extern void ata_port_init(struct ata_port *ap, struct ata_host *host, - const struct ata_probe_ent *ent, unsigned int port_no); -extern struct ata_probe_ent *ata_probe_ent_alloc(struct device *dev, - const struct ata_port_info *port); +extern struct ata_port *ata_port_alloc(struct ata_host *host); +extern const char *sata_spd_string(unsigned int spd); +extern int ata_port_probe(struct ata_port *ap); +extern void __ata_port_probe(struct ata_port *ap); +#define to_ata_port(d) container_of(d, struct ata_port, tdev) -/* libata-scsi.c */ -extern struct scsi_transport_template ata_scsi_transport_template; +/* libata-acpi.c */ +#ifdef CONFIG_ATA_ACPI +extern unsigned int ata_acpi_gtf_filter; +extern void ata_acpi_dissociate(struct ata_host *host); +extern int ata_acpi_on_suspend(struct ata_port *ap); +extern void ata_acpi_on_resume(struct ata_port *ap); +extern int ata_acpi_on_devcfg(struct ata_device *dev); +extern void ata_acpi_on_disable(struct ata_device *dev); +extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state); +extern void ata_acpi_bind_port(struct ata_port *ap); +extern void ata_acpi_bind_dev(struct ata_device *dev); +extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev); +#else +static inline void ata_acpi_dissociate(struct ata_host *host) { } +static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } +static inline void ata_acpi_on_resume(struct ata_port *ap) { } +static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; } +static inline void ata_acpi_on_disable(struct ata_device *dev) { } +static inline void ata_acpi_set_state(struct ata_port *ap, + pm_message_t state) { } +static inline void ata_acpi_bind_port(struct ata_port *ap) {} +static inline void ata_acpi_bind_dev(struct ata_device *dev) {} +#endif -extern void ata_scsi_scan_host(struct ata_port *ap); +/* libata-scsi.c */ +extern int ata_scsi_add_hosts(struct ata_host *host, + struct scsi_host_template *sht); +extern void ata_scsi_scan_host(struct ata_port *ap, int sync); extern int ata_scsi_offline_dev(struct ata_device *dev); -extern void ata_scsi_hotplug(void *data); -extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen); - -extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen); - -extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen); -extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen); -extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen); -extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen); -extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen); -extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen); -extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen); -extern void ata_scsi_badcmd(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *), - u8 asc, u8 ascq); -extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, - u8 sk, u8 asc, u8 ascq); -extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, - unsigned int (*actor) (struct ata_scsi_args *args, - u8 *rbuf, unsigned int buflen)); +extern void ata_scsi_media_change_notify(struct ata_device *dev); +extern void ata_scsi_hotplug(struct work_struct *work); extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); -extern void ata_scsi_dev_rescan(void *data); +extern void ata_scsi_dev_rescan(struct work_struct *work); extern int ata_bus_probe(struct ata_port *ap); +extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, + unsigned int id, unsigned int lun); + /* libata-eh.c */ -extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); +extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); +extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); +extern void ata_eh_acquire(struct ata_port *ap); +extern void ata_eh_release(struct ata_port *ap); +extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); extern void ata_scsi_error(struct Scsi_Host *host); -extern void ata_port_wait_eh(struct ata_port *ap); +extern void ata_eh_fastdrain_timerfn(unsigned long arg); extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); +extern void ata_dev_disable(struct ata_device *dev); +extern void ata_eh_detach_dev(struct ata_device *dev); +extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, + unsigned int action); +extern void ata_eh_done(struct ata_link *link, struct ata_device *dev, + unsigned int action); +extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log, + u8 page, void *buf, unsigned int sectors); +extern void ata_eh_autopsy(struct ata_port *ap); +const char *ata_get_cmd_descript(u8 command); +extern void ata_eh_report(struct ata_port *ap); +extern int ata_eh_reset(struct ata_link *link, int classify, + ata_prereset_fn_t prereset, ata_reset_fn_t softreset, + ata_reset_fn_t hardreset, ata_postreset_fn_t postreset); +extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); +extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, + ata_reset_fn_t softreset, ata_reset_fn_t hardreset, + ata_postreset_fn_t postreset, + struct ata_link **r_failed_disk); +extern void ata_eh_finish(struct ata_port *ap); +extern int ata_ering_map(struct ata_ering *ering, + int (*map_fn)(struct ata_ering_entry *, void *), + void *arg); +extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key); +extern unsigned int atapi_eh_request_sense(struct ata_device *dev, + u8 *sense_buf, u8 dfl_sense_key); + +/* libata-pmp.c */ +#ifdef CONFIG_SATA_PMP +extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val); +extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val); +extern int sata_pmp_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + unsigned hints); +extern int sata_pmp_attach(struct ata_device *dev); +#else /* CONFIG_SATA_PMP */ +static inline int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val) +{ + return -EINVAL; +} + +static inline int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val) +{ + return -EINVAL; +} + +static inline int sata_pmp_set_lpm(struct ata_link *link, + enum ata_lpm_policy policy, unsigned hints) +{ + return -EINVAL; +} + +static inline int sata_pmp_attach(struct ata_device *dev) +{ + return -EINVAL; +} +#endif /* CONFIG_SATA_PMP */ + +/* libata-sff.c */ +#ifdef CONFIG_ATA_SFF +extern void ata_sff_flush_pio_task(struct ata_port *ap); +extern void ata_sff_port_init(struct ata_port *ap); +extern int ata_sff_init(void); +extern void ata_sff_exit(void); +#else /* CONFIG_ATA_SFF */ +static inline void ata_sff_flush_pio_task(struct ata_port *ap) +{ } +static inline void ata_sff_port_init(struct ata_port *ap) +{ } +static inline int ata_sff_init(void) +{ return 0; } +static inline void ata_sff_exit(void) +{ } +#endif /* CONFIG_ATA_SFF */ + +/* libata-zpodd.c */ +#ifdef CONFIG_SATA_ZPODD +void zpodd_init(struct ata_device *dev); +void zpodd_exit(struct ata_device *dev); +static inline bool zpodd_dev_enabled(struct ata_device *dev) +{ + return dev->zpodd != NULL; +} +void zpodd_on_suspend(struct ata_device *dev); +bool zpodd_zpready(struct ata_device *dev); +void zpodd_enable_run_wake(struct ata_device *dev); +void zpodd_disable_run_wake(struct ata_device *dev); +void zpodd_post_poweron(struct ata_device *dev); +#else /* CONFIG_SATA_ZPODD */ +static inline void zpodd_init(struct ata_device *dev) {} +static inline void zpodd_exit(struct ata_device *dev) {} +static inline bool zpodd_dev_enabled(struct ata_device *dev) { return false; } +static inline void zpodd_on_suspend(struct ata_device *dev) {} +static inline bool zpodd_zpready(struct ata_device *dev) { return false; } +static inline void zpodd_enable_run_wake(struct ata_device *dev) {} +static inline void zpodd_disable_run_wake(struct ata_device *dev) {} +static inline void zpodd_post_poweron(struct ata_device *dev) {} +#endif /* CONFIG_SATA_ZPODD */ #endif /* __LIBATA_H__ */ diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c new file mode 100644 index 00000000000..b70fce2a38e --- /dev/null +++ b/drivers/ata/pata_acpi.c @@ -0,0 +1,280 @@ +/* + * ACPI PATA driver + * + * (c) 2007 Red Hat + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <linux/acpi.h> +#include <linux/libata.h> +#include <linux/ata.h> +#include <scsi/scsi_host.h> + +#define DRV_NAME "pata_acpi" +#define DRV_VERSION "0.2.3" + +struct pata_acpi { + struct ata_acpi_gtm gtm; + void *last; + unsigned long mask[2]; +}; + +/** + * pacpi_pre_reset - check for 40/80 pin + * @ap: Port + * @deadline: deadline jiffies for the operation + * + * Perform the PATA port setup we need. + */ + +static int pacpi_pre_reset(struct ata_link *link, unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct pata_acpi *acpi = ap->private_data; + if (ACPI_HANDLE(&ap->tdev) == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0) + return -ENODEV; + + return ata_sff_prereset(link, deadline); +} + +/** + * pacpi_cable_detect - cable type detection + * @ap: port to detect + * + * Perform device specific cable detection + */ + +static int pacpi_cable_detect(struct ata_port *ap) +{ + struct pata_acpi *acpi = ap->private_data; + + if ((acpi->mask[0] | acpi->mask[1]) & (0xF8 << ATA_SHIFT_UDMA)) + return ATA_CBL_PATA80; + else + return ATA_CBL_PATA40; +} + +/** + * pacpi_discover_modes - filter non ACPI modes + * @adev: ATA device + * @mask: proposed modes + * + * Try the modes available and see which ones the ACPI method will + * set up sensibly. From this we get a mask of ACPI modes we can use + */ + +static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev) +{ + struct pata_acpi *acpi = ap->private_data; + struct ata_acpi_gtm probe; + unsigned int xfer_mask; + + probe = acpi->gtm; + + ata_acpi_gtm(ap, &probe); + + xfer_mask = ata_acpi_gtm_xfermask(adev, &probe); + + if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) + ap->cbl = ATA_CBL_PATA80; + + return xfer_mask; +} + +/** + * pacpi_mode_filter - mode filter for ACPI + * @adev: device + * @mask: mask of valid modes + * + * Filter the valid mode list according to our own specific rules, in + * this case the list of discovered valid modes obtained by ACPI probing + */ + +static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask) +{ + struct pata_acpi *acpi = adev->link->ap->private_data; + return mask & acpi->mask[adev->devno]; +} + +/** + * pacpi_set_piomode - set initial PIO mode data + * @ap: ATA interface + * @adev: ATA device + */ + +static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + int unit = adev->devno; + struct pata_acpi *acpi = ap->private_data; + const struct ata_timing *t; + + if (!(acpi->gtm.flags & 0x10)) + unit = 0; + + /* Now stuff the nS values into the structure */ + t = ata_timing_find_mode(adev->pio_mode); + acpi->gtm.drive[unit].pio = t->cycle; + ata_acpi_stm(ap, &acpi->gtm); + /* See what mode we actually got */ + ata_acpi_gtm(ap, &acpi->gtm); +} + +/** + * pacpi_set_dmamode - set initial DMA mode data + * @ap: ATA interface + * @adev: ATA device + */ + +static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev) +{ + int unit = adev->devno; + struct pata_acpi *acpi = ap->private_data; + const struct ata_timing *t; + + if (!(acpi->gtm.flags & 0x10)) + unit = 0; + + /* Now stuff the nS values into the structure */ + t = ata_timing_find_mode(adev->dma_mode); + if (adev->dma_mode >= XFER_UDMA_0) { + acpi->gtm.drive[unit].dma = t->udma; + acpi->gtm.flags |= (1 << (2 * unit)); + } else { + acpi->gtm.drive[unit].dma = t->cycle; + acpi->gtm.flags &= ~(1 << (2 * unit)); + } + ata_acpi_stm(ap, &acpi->gtm); + /* See what mode we actually got */ + ata_acpi_gtm(ap, &acpi->gtm); +} + +/** + * pacpi_qc_issue - command issue + * @qc: command pending + * + * Called when the libata layer is about to issue a command. We wrap + * this interface so that we can load the correct ATA timings if + * necessary. + */ + +static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_device *adev = qc->dev; + struct pata_acpi *acpi = ap->private_data; + + if (acpi->gtm.flags & 0x10) + return ata_bmdma_qc_issue(qc); + + if (adev != acpi->last) { + pacpi_set_piomode(ap, adev); + if (ata_dma_enabled(adev)) + pacpi_set_dmamode(ap, adev); + acpi->last = adev; + } + return ata_bmdma_qc_issue(qc); +} + +/** + * pacpi_port_start - port setup + * @ap: ATA port being set up + * + * Use the port_start hook to maintain private control structures + */ + +static int pacpi_port_start(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct pata_acpi *acpi; + + if (ACPI_HANDLE(&ap->tdev) == NULL) + return -ENODEV; + + acpi = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(struct pata_acpi), GFP_KERNEL); + if (ap->private_data == NULL) + return -ENOMEM; + acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]); + acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]); + return ata_bmdma_port_start(ap); +} + +static struct scsi_host_template pacpi_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations pacpi_ops = { + .inherits = &ata_bmdma_port_ops, + .qc_issue = pacpi_qc_issue, + .cable_detect = pacpi_cable_detect, + .mode_filter = pacpi_mode_filter, + .set_piomode = pacpi_set_piomode, + .set_dmamode = pacpi_set_dmamode, + .prereset = pacpi_pre_reset, + .port_start = pacpi_port_start, +}; + + +/** + * pacpi_init_one - Register ACPI ATA PCI device with kernel services + * @pdev: PCI device to register + * @ent: Entry in pacpi_pci_tbl matching with @pdev + * + * Called from kernel PCI layer. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, or -ERRNO value. + */ + +static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) +{ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + + .port_ops = &pacpi_ops, + }; + const struct ata_port_info *ppi[] = { &info, NULL }; + if (pdev->vendor == PCI_VENDOR_ID_ATI) { + int rc = pcim_enable_device(pdev); + if (rc < 0) + return rc; + pcim_pin_device(pdev); + } + return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0); +} + +static const struct pci_device_id pacpi_pci_tbl[] = { + { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1}, + { } /* terminate list */ +}; + +static struct pci_driver pacpi_pci_driver = { + .name = DRV_NAME, + .id_table = pacpi_pci_tbl, + .probe = pacpi_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif +}; + +module_pci_driver(pacpi_pci_driver); + +MODULE_AUTHOR("Alan Cox"); +MODULE_DESCRIPTION("SCSI low-level driver for ATA in ACPI mode"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, pacpi_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 87af3b5861a..d19cd88ed2d 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -1,7 +1,6 @@ /* * pata_ali.c - ALI 15x3 PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox <alan@redhat.com> * * based in part upon * linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02 @@ -20,7 +19,9 @@ * * TODO/CHECK * Cannot have ATAPI on both master & slave for rev < c2 (???) but - * otherwise should do atapi DMA. + * otherwise should do atapi DMA (For now for old we do PIO only for + * ATAPI) + * Review Sunblade workaround. */ #include <linux/kernel.h> @@ -34,18 +35,31 @@ #include <linux/dmi.h> #define DRV_NAME "pata_ali" -#define DRV_VERSION "0.6.5" +#define DRV_VERSION "0.7.8" + +static int ali_atapi_dma = 0; +module_param_named(atapi_dma, ali_atapi_dma, int, 0644); +MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)"); + +static struct pci_dev *ali_isa_bridge; /* * Cable special cases */ -static struct dmi_system_id cable_dmi_table[] = { +static const struct dmi_system_id cable_dmi_table[] = { { .ident = "HP Pavilion N5430", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_BOARD_NAME, "OmniBook N32N-736"), + DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"), + }, + }, + { + .ident = "Toshiba Satellite S1800-814", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"), }, }, { } @@ -56,6 +70,9 @@ static int ali_cable_override(struct pci_dev *pdev) /* Fujitsu P2000 */ if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF) return 1; + /* Mitac 8317 (Winbook-A) and relatives */ + if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317) + return 1; /* Systems by DMI */ if (dmi_check_system(cable_dmi_table)) return 1; @@ -78,7 +95,7 @@ static int ali_c2_cable_detect(struct ata_port *ap) implement the detect logic */ if (ali_cable_override(pdev)) - return ATA_CBL_PATA80; + return ATA_CBL_PATA40_SHORT; /* Host view cable detect 0x4A bit 0 primary bit 1 secondary Bit set for 40 pin */ @@ -90,59 +107,6 @@ static int ali_c2_cable_detect(struct ata_port *ap) } /** - * ali_early_error_handler - reset for eary chip - * @ap: ATA port - * - * Handle the reset callback for the later chips with cable detect - */ - -static int ali_c2_pre_reset(struct ata_port *ap) -{ - ap->cbl = ali_c2_cable_detect(ap); - return ata_std_prereset(ap); -} - -static void ali_c2_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, ali_c2_pre_reset, - ata_std_softreset, NULL, - ata_std_postreset); -} - -/** - * ali_early_cable_detect - cable detection - * @ap: ATA port - * - * Perform cable detection for older chipsets. This turns out to be - * rather easy to implement - */ - -static int ali_early_cable_detect(struct ata_port *ap) -{ - return ATA_CBL_PATA40; -} - -/** - * ali_early_probe_init - reset for early chip - * @ap: ATA port - * - * Handle the reset callback for the early (pre cable detect) chips. - */ - -static int ali_early_pre_reset(struct ata_port *ap) -{ - ap->cbl = ali_early_cable_detect(ap); - return ata_std_prereset(ap); -} - -static void ali_early_error_handler(struct ata_port *ap) -{ - return ata_bmdma_drive_eh(ap, ali_early_pre_reset, - ata_std_softreset, NULL, - ata_std_postreset); -} - -/** * ali_20_filter - filter for earlier ALI DMA * @ap: ALi ATA port * @adev: attached device @@ -151,16 +115,16 @@ static void ali_early_error_handler(struct ata_port *ap) * fix that later on. Also ensure we do not do UDMA on WDC drives */ -static unsigned long ali_20_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) +static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask) { - char model_num[40]; + char model_num[ATA_ID_PROD_LEN + 1]; /* No DMA on anything but a disk for now */ if (adev->class != ATA_DEV_ATA) mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); - ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num)); + ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); if (strstr(model_num, "WDC")) return mask &= ~ATA_MASK_UDMA; - return ata_pci_default_filter(ap, adev, mask); + return mask; } /** @@ -187,8 +151,7 @@ static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int o pci_read_config_byte(pdev, pio_fifo, &fifo); fifo &= ~(0x0F << shift); - if (on) - fifo |= (on << shift); + fifo |= (on << shift); pci_write_config_byte(pdev, pio_fifo, fifo); } @@ -196,8 +159,7 @@ static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int o * ali_program_modes - load mode registers * @ap: ALi channel to load * @adev: Device the timing is for - * @cmd: Command timing - * @data: Data timing + * @t: timing data * @ultra: UDMA timing or zero for off * * Loads the timing registers for cmd/data and disable UDMA if @@ -216,11 +178,11 @@ static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, stru u8 udma; if (t != NULL) { - t->setup = FIT(t->setup, 1, 8) & 7; - t->act8b = FIT(t->act8b, 1, 8) & 7; - t->rec8b = FIT(t->rec8b, 1, 16) & 15; - t->active = FIT(t->active, 1, 8) & 7; - t->recover = FIT(t->recover, 1, 16) & 15; + t->setup = clamp_val(t->setup, 1, 8) & 7; + t->act8b = clamp_val(t->act8b, 1, 8) & 7; + t->rec8b = clamp_val(t->rec8b, 1, 16) & 15; + t->active = clamp_val(t->active, 1, 8) & 7; + t->recover = clamp_val(t->recover, 1, 16) & 15; pci_write_config_byte(pdev, cas, t->setup); pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b); @@ -239,8 +201,7 @@ static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, stru * @ap: ATA interface * @adev: ATA device * - * Program the ALi registers for PIO mode. FIXME: add timings for - * PIO5. + * Program the ALi registers for PIO mode. */ static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev) @@ -274,7 +235,7 @@ static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev) * @ap: ATA interface * @adev: ATA device * - * FIXME: MWDMA timings + * Program the ALi registers for DMA mode. */ static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev) @@ -313,8 +274,28 @@ static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev) } /** + * ali_warn_atapi_dma - Warn about ATAPI DMA disablement + * @adev: Device + * + * Whine about ATAPI DMA disablement if @adev is an ATAPI device. + * Can be used as ->dev_config. + */ + +static void ali_warn_atapi_dma(struct ata_device *adev) +{ + struct ata_eh_context *ehc = &adev->link->eh_context; + int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; + + if (print_info && adev->class == ATA_DEV_ATAPI && !ali_atapi_dma) { + ata_dev_warn(adev, + "WARNING: ATAPI DMA disabled for reliability issues. It can be enabled\n"); + ata_dev_warn(adev, + "WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n"); + } +} + +/** * ali_lock_sectors - Keep older devices to 255 sector mode - * @ap: ATA port * @adev: Device * * Called during the bus probe for each device that is found. We use @@ -324,29 +305,58 @@ static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev) * slower PIO methods */ -static void ali_lock_sectors(struct ata_port *ap, struct ata_device *adev) +static void ali_lock_sectors(struct ata_device *adev) { adev->max_sectors = 255; + ali_warn_atapi_dma(adev); +} + +/** + * ali_check_atapi_dma - DMA check for most ALi controllers + * @adev: Device + * + * Called to decide whether commands should be sent by DMA or PIO + */ + +static int ali_check_atapi_dma(struct ata_queued_cmd *qc) +{ + if (!ali_atapi_dma) { + /* FIXME: pata_ali can't do ATAPI DMA reliably but the + * IDE alim15x3 driver can. I tried lots of things + * but couldn't find what the actual difference was. + * If you got an idea, please write it to + * linux-ide@vger.kernel.org and cc htejun@gmail.com. + * + * Disable ATAPI DMA for now. + */ + return -EOPNOTSUPP; + } + + /* If its not a media command, its not worth it */ + if (atapi_cmd_type(qc->cdb[0]) == ATAPI_MISC) + return -EOPNOTSUPP; + return 0; +} + +static void ali_c2_c3_postreset(struct ata_link *link, unsigned int *classes) +{ + u8 r; + int port_bit = 4 << link->ap->port_no; + + /* If our bridge is an ALI 1533 then do the extra work */ + if (ali_isa_bridge) { + /* Tristate and re-enable the bus signals */ + pci_read_config_byte(ali_isa_bridge, 0x58, &r); + r &= ~port_bit; + pci_write_config_byte(ali_isa_bridge, 0x58, r); + r |= port_bit; + pci_write_config_byte(ali_isa_bridge, 0x58, r); + } + ata_sff_postreset(link, classes); } static struct scsi_host_template ali_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - /* Keep LBA28 counts so large I/O's don't turn LBA48 and PIO - with older controllers. Not locked so will grow on C5 or later */ - .max_sectors = 255, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; /* @@ -354,30 +364,16 @@ static struct scsi_host_template ali_sht = { */ static struct ata_port_operations ali_early_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, .set_piomode = ali_set_piomode, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ali_early_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, + .sff_data_xfer = ata_sff_data_xfer32, +}; - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop +static const struct ata_port_operations ali_dma_base_ops = { + .inherits = &ata_bmdma32_port_ops, + .set_piomode = ali_set_piomode, + .set_dmamode = ali_set_dmamode, }; /* @@ -385,117 +381,104 @@ static struct ata_port_operations ali_early_port_ops = { * detect */ static struct ata_port_operations ali_20_port_ops = { - .port_disable = ata_port_disable, - - .set_piomode = ali_set_piomode, - .set_dmamode = ali_set_dmamode, + .inherits = &ali_dma_base_ops, + .cable_detect = ata_cable_40wire, .mode_filter = ali_20_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, + .check_atapi_dma = ali_check_atapi_dma, .dev_config = ali_lock_sectors, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ali_early_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; /* * Port operations for DMA capable ALi with cable detect */ static struct ata_port_operations ali_c2_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = ali_set_piomode, - .set_dmamode = ali_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, + .inherits = &ali_dma_base_ops, + .check_atapi_dma = ali_check_atapi_dma, + .cable_detect = ali_c2_cable_detect, .dev_config = ali_lock_sectors, + .postreset = ali_c2_c3_postreset, +}; - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ali_c2_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop +/* + * Port operations for DMA capable ALi with cable detect + */ +static struct ata_port_operations ali_c4_port_ops = { + .inherits = &ali_dma_base_ops, + .check_atapi_dma = ali_check_atapi_dma, + .cable_detect = ali_c2_cable_detect, + .dev_config = ali_lock_sectors, }; /* * Port operations for DMA capable ALi with cable detect and LBA48 */ static struct ata_port_operations ali_c5_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = ali_set_piomode, - .set_dmamode = ali_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ali_c2_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .inherits = &ali_dma_base_ops, + .check_atapi_dma = ali_check_atapi_dma, + .dev_config = ali_warn_atapi_dma, + .cable_detect = ali_c2_cable_detect, }; + +/** + * ali_init_chipset - chip setup function + * @pdev: PCI device of ATA controller + * + * Perform the setup on the device that must be done both at boot + * and at resume time. + */ + +static void ali_init_chipset(struct pci_dev *pdev) +{ + u8 tmp; + struct pci_dev *north; + + /* + * The chipset revision selects the driver operations and + * mode data. + */ + + if (pdev->revision <= 0x20) { + pci_read_config_byte(pdev, 0x53, &tmp); + tmp |= 0x03; + pci_write_config_byte(pdev, 0x53, tmp); + } else { + pci_read_config_byte(pdev, 0x4a, &tmp); + pci_write_config_byte(pdev, 0x4a, tmp | 0x20); + pci_read_config_byte(pdev, 0x4B, &tmp); + if (pdev->revision < 0xC2) + /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */ + /* Clear CD-ROM DMA write bit */ + tmp &= 0x7F; + /* Cable and UDMA */ + if (pdev->revision >= 0xc2) + tmp |= 0x01; + pci_write_config_byte(pdev, 0x4B, tmp | 0x08); + /* + * CD_ROM DMA on (0x53 bit 0). Enable this even if we want + * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control + * via 0x54/55. + */ + pci_read_config_byte(pdev, 0x53, &tmp); + if (pdev->revision >= 0xc7) + tmp |= 0x03; + else + tmp |= 0x01; /* CD_ROM enable for DMA */ + pci_write_config_byte(pdev, 0x53, tmp); + } + north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); + if (north && north->vendor == PCI_VENDOR_ID_AL && ali_isa_bridge) { + /* Configure the ALi bridge logic. For non ALi rely on BIOS. + Set the south bridge enable bit */ + pci_read_config_byte(ali_isa_bridge, 0x79, &tmp); + if (pdev->revision == 0xC2) + pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x04); + else if (pdev->revision > 0xC2 && pdev->revision < 0xC5) + pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x02); + } + pci_dev_put(north); + ata_pci_bmdma_clear_simplex(pdev); +} /** * ali_init_one - discovery callback * @pdev: PCI device ID @@ -507,165 +490,154 @@ static struct ata_port_operations ali_c5_port_ops = { static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { - static struct ata_port_info info_early = { - .sht = &ali_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, + static const struct ata_port_info info_early = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, .port_ops = &ali_early_port_ops }; /* Revision 0x20 added DMA */ - static struct ata_port_info info_20 = { - .sht = &ali_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + static const struct ata_port_info info_20 = { + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | + ATA_FLAG_IGN_SIMPLEX, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .port_ops = &ali_20_port_ops }; /* Revision 0x20 with support logic added UDMA */ - static struct ata_port_info info_20_udma = { - .sht = &ali_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x07, /* UDMA33 */ + static const struct ata_port_info info_20_udma = { + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | + ATA_FLAG_IGN_SIMPLEX, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA2, .port_ops = &ali_20_port_ops }; /* Revision 0xC2 adds UDMA66 */ - static struct ata_port_info info_c2 = { - .sht = &ali_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x1f, + static const struct ata_port_info info_c2 = { + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | + ATA_FLAG_IGN_SIMPLEX, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA4, .port_ops = &ali_c2_port_ops }; - /* Revision 0xC3 is UDMA100 */ - static struct ata_port_info info_c3 = { - .sht = &ali_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, + /* Revision 0xC3 is UDMA66 for now */ + static const struct ata_port_info info_c3 = { + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | + ATA_FLAG_IGN_SIMPLEX, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA4, .port_ops = &ali_c2_port_ops }; - /* Revision 0xC4 is UDMA133 */ - static struct ata_port_info info_c4 = { - .sht = &ali_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7f, - .port_ops = &ali_c2_port_ops + /* Revision 0xC4 is UDMA100 */ + static const struct ata_port_info info_c4 = { + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | + ATA_FLAG_IGN_SIMPLEX, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, + .port_ops = &ali_c4_port_ops }; /* Revision 0xC5 is UDMA133 with LBA48 DMA */ - static struct ata_port_info info_c5 = { - .sht = &ali_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7f, + static const struct ata_port_info info_c5 = { + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_IGN_SIMPLEX, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &ali_c5_port_ops }; - static struct ata_port_info *port_info[2]; - u8 rev, tmp; - struct pci_dev *north, *isa_bridge; + const struct ata_port_info *ppi[] = { NULL, NULL }; + u8 tmp; + int rc; - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); + rc = pcim_enable_device(pdev); + if (rc) + return rc; /* * The chipset revision selects the driver operations and * mode data. */ - if (rev < 0x20) { - port_info[0] = port_info[1] = &info_early; - } else if (rev < 0xC2) { - /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */ - pci_read_config_byte(pdev, 0x4B, &tmp); - /* Clear CD-ROM DMA write bit */ - tmp &= 0x7F; - pci_write_config_byte(pdev, 0x4B, tmp); - port_info[0] = port_info[1] = &info_20; - } else if (rev == 0xC2) { - port_info[0] = port_info[1] = &info_c2; - } else if (rev == 0xC3) { - port_info[0] = port_info[1] = &info_c3; - } else if (rev == 0xC4) { - port_info[0] = port_info[1] = &info_c4; + if (pdev->revision < 0x20) { + ppi[0] = &info_early; + } else if (pdev->revision < 0xC2) { + ppi[0] = &info_20; + } else if (pdev->revision == 0xC2) { + ppi[0] = &info_c2; + } else if (pdev->revision == 0xC3) { + ppi[0] = &info_c3; + } else if (pdev->revision == 0xC4) { + ppi[0] = &info_c4; } else - port_info[0] = port_info[1] = &info_c5; - - if (rev >= 0xC2) { - /* Enable cable detection logic */ - pci_read_config_byte(pdev, 0x4B, &tmp); - pci_write_config_byte(pdev, 0x4B, tmp | 0x08); - } + ppi[0] = &info_c5; - north = pci_get_slot(pdev->bus, PCI_DEVFN(0,0)); - isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); + ali_init_chipset(pdev); - if (north && north->vendor == PCI_VENDOR_ID_AL) { - /* Configure the ALi bridge logic. For non ALi rely on BIOS. - Set the south bridge enable bit */ - pci_read_config_byte(isa_bridge, 0x79, &tmp); - if (rev == 0xC2) - pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04); - else if (rev > 0xC2) - pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02); + if (ali_isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) { + /* Are we paired with a UDMA capable chip */ + pci_read_config_byte(ali_isa_bridge, 0x5E, &tmp); + if ((tmp & 0x1E) == 0x12) + ppi[0] = &info_20_udma; } - if (rev >= 0x20) { - if (rev < 0xC2) { - /* Are we paired with a UDMA capable chip */ - pci_read_config_byte(isa_bridge, 0x5E, &tmp); - if ((tmp & 0x1E) == 0x12) - port_info[0] = port_info[1] = &info_20_udma; - } - /* - * CD_ROM DMA on (0x53 bit 0). Enable this even if we want - * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control - * via 0x54/55. - */ - pci_read_config_byte(pdev, 0x53, &tmp); - if (rev <= 0x20) - tmp &= ~0x02; - if (rev == 0xc7) - tmp |= 0x03; - else - tmp |= 0x01; /* CD_ROM enable for DMA */ - pci_write_config_byte(pdev, 0x53, tmp); - } - - pci_dev_put(isa_bridge); - pci_dev_put(north); + if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask) + return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); + else + return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0); +} - ata_pci_clear_simplex(pdev); - return ata_pci_init_one(pdev, port_info, 2); +#ifdef CONFIG_PM_SLEEP +static int ali_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + ali_init_chipset(pdev); + ata_host_resume(host); + return 0; } +#endif -static struct pci_device_id ali[] = { - { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5228), }, - { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5229), }, - { 0, }, +static const struct pci_device_id ali[] = { + { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), }, + { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), }, + + { }, }; static struct pci_driver ali_pci_driver = { .name = DRV_NAME, .id_table = ali, .probe = ali_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ali_reinit_one, +#endif }; static int __init ali_init(void) { - return pci_register_driver(&ali_pci_driver); + int ret; + ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); + + ret = pci_register_driver(&ali_pci_driver); + if (ret < 0) + pci_dev_put(ali_isa_bridge); + return ret; } static void __exit ali_exit(void) { pci_unregister_driver(&ali_pci_driver); + pci_dev_put(ali_isa_bridge); } diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index 599ee266722..8d4d959a821 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -1,7 +1,6 @@ /* * pata_amd.c - AMD PATA for new ATA layer * (C) 2005-2006 Red Hat Inc - * Alan Cox <alan@redhat.com> * * Based on pata-sil680. Errata information is taken from data sheets * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are @@ -12,20 +11,19 @@ * Power management on ports * * - * Documentation publically available. + * Documentation publicly available. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_amd" -#define DRV_VERSION "0.2.4" +#define DRV_VERSION "0.4.1" /** * timing_setup - shared timing computation and load @@ -56,10 +54,12 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse u8 t; T = 1000000000 / amd_clock; - UT = T / min_t(int, max_t(int, clock, 1), 2); + UT = T; + if (clock >= 2) + UT = T / 2; if (ata_timing_compute(adev, speed, &at, T, UT) < 0) { - dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed); + dev_err(&pdev->dev, "unknown mode %d\n", speed); return; } @@ -82,32 +82,32 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse /* Configure the address set up timing */ pci_read_config_byte(pdev, offset + 0x0C, &t); - t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1)); + t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(at.setup, 1, 4) - 1) << ((3 - dn) << 1)); pci_write_config_byte(pdev, offset + 0x0C , t); /* Configure the 8bit I/O timing */ pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)), - ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1)); + ((clamp_val(at.act8b, 1, 16) - 1) << 4) | (clamp_val(at.rec8b, 1, 16) - 1)); /* Drive timing */ pci_write_config_byte(pdev, offset + 0x08 + (3 - dn), - ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1)); + ((clamp_val(at.active, 1, 16) - 1) << 4) | (clamp_val(at.recover, 1, 16) - 1)); switch (clock) { case 1: - t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03; + t = at.udma ? (0xc0 | (clamp_val(at.udma, 2, 5) - 2)) : 0x03; break; case 2: - t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03; + t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 2, 10)]) : 0x03; break; case 3: - t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03; + t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 10)]) : 0x03; break; case 4: - t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03; + t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 15)]) : 0x03; break; default: @@ -115,69 +115,86 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse } /* UDMA timing */ - pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t); + if (at.udma) + pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t); } /** - * amd_probe_init - cable detection - * @ap: ATA port + * amd_pre_reset - perform reset handling + * @link: ATA link + * @deadline: deadline jiffies for the operation * - * Perform cable detection. The BIOS stores this in PCI config - * space for us. + * Reset sequence checking enable bits to see which ports are + * active. */ -static int amd_pre_reset(struct ata_port *ap) +static int amd_pre_reset(struct ata_link *link, unsigned long deadline) { - static const u32 bitmask[2] = {0x03, 0xC0}; static const struct pci_bits amd_enable_bits[] = { { 0x40, 1, 0x02, 0x02 }, { 0x40, 1, 0x01, 0x01 } }; + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u8 ata66; if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) return -ENOENT; - pci_read_config_byte(pdev, 0x42, &ata66); - if (ata66 & bitmask[ap->port_no]) - ap->cbl = ATA_CBL_PATA80; - else - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); - + return ata_sff_prereset(link, deadline); } -static void amd_error_handler(struct ata_port *ap) -{ - return ata_bmdma_drive_eh(ap, amd_pre_reset, - ata_std_softreset, NULL, - ata_std_postreset); -} +/** + * amd_cable_detect - report cable type + * @ap: port + * + * AMD controller/BIOS setups record the cable type in word 0x42 + */ -static int amd_early_pre_reset(struct ata_port *ap) +static int amd_cable_detect(struct ata_port *ap) { + static const u32 bitmask[2] = {0x03, 0x0C}; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - static struct pci_bits amd_enable_bits[] = { - { 0x40, 1, 0x02, 0x02 }, - { 0x40, 1, 0x01, 0x01 } - }; - - if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) - return -ENOENT; - - /* No host side cable detection */ - ap->cbl = ATA_CBL_PATA80; - return ata_std_prereset(ap); + u8 ata66; + pci_read_config_byte(pdev, 0x42, &ata66); + if (ata66 & bitmask[ap->port_no]) + return ATA_CBL_PATA80; + return ATA_CBL_PATA40; } -static void amd_early_error_handler(struct ata_port *ap) +/** + * amd_fifo_setup - set the PIO FIFO for ATA/ATAPI + * @ap: ATA interface + * @adev: ATA device + * + * Set the PCI fifo for this device according to the devices present + * on the bus at this point in time. We need to turn the post write buffer + * off for ATAPI devices as we may need to issue a word sized write to the + * device as the final I/O + */ + +static void amd_fifo_setup(struct ata_port *ap) { - ata_bmdma_drive_eh(ap, amd_early_pre_reset, - ata_std_softreset, NULL, - ata_std_postreset); + struct ata_device *adev; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + static const u8 fifobit[2] = { 0xC0, 0x30}; + u8 fifo = fifobit[ap->port_no]; + u8 r; + + + ata_for_each_dev(adev, &ap->link, ENABLED) { + if (adev->class == ATA_DEV_ATAPI) + fifo = 0; + } + if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411) /* FIFO is broken */ + fifo = 0; + + /* On the later chips the read prefetch bits become no-op bits */ + pci_read_config_byte(pdev, 0x41, &r); + r &= ~fifobit[ap->port_no]; + r |= fifo; + pci_write_config_byte(pdev, 0x41, r); } /** @@ -190,21 +207,25 @@ static void amd_early_error_handler(struct ata_port *ap) static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev) { + amd_fifo_setup(ap); timing_setup(ap, adev, 0x40, adev->pio_mode, 1); } static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev) { + amd_fifo_setup(ap); timing_setup(ap, adev, 0x40, adev->pio_mode, 2); } static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev) { + amd_fifo_setup(ap); timing_setup(ap, adev, 0x40, adev->pio_mode, 3); } static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev) { + amd_fifo_setup(ap); timing_setup(ap, adev, 0x40, adev->pio_mode, 4); } @@ -237,50 +258,90 @@ static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev) timing_setup(ap, adev, 0x40, adev->dma_mode, 4); } +/* Both host-side and drive-side detection results are worthless on NV + * PATAs. Ignore them and just follow what BIOS configured. Both the + * current configuration in PCI config reg and ACPI GTM result are + * cached during driver attach and are consulted to select transfer + * mode. + */ +static unsigned long nv_mode_filter(struct ata_device *dev, + unsigned long xfer_mask) +{ + static const unsigned int udma_mask_map[] = + { ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0, + ATA_UDMA3, ATA_UDMA4, ATA_UDMA5, ATA_UDMA6 }; + struct ata_port *ap = dev->link->ap; + char acpi_str[32] = ""; + u32 saved_udma, udma; + const struct ata_acpi_gtm *gtm; + unsigned long bios_limit = 0, acpi_limit = 0, limit; + + /* find out what BIOS configured */ + udma = saved_udma = (unsigned long)ap->host->private_data; + + if (ap->port_no == 0) + udma >>= 16; + if (dev->devno == 0) + udma >>= 8; + + if ((udma & 0xc0) == 0xc0) + bios_limit = ata_pack_xfermask(0, 0, udma_mask_map[udma & 0x7]); + + /* consult ACPI GTM too */ + gtm = ata_acpi_init_gtm(ap); + if (gtm) { + acpi_limit = ata_acpi_gtm_xfermask(dev, gtm); + + snprintf(acpi_str, sizeof(acpi_str), " (%u:%u:0x%x)", + gtm->drive[0].dma, gtm->drive[1].dma, gtm->flags); + } + + /* be optimistic, EH can take care of things if something goes wrong */ + limit = bios_limit | acpi_limit; + + /* If PIO or DMA isn't configured at all, don't limit. Let EH + * handle it. + */ + if (!(limit & ATA_MASK_PIO)) + limit |= ATA_MASK_PIO; + if (!(limit & (ATA_MASK_MWDMA | ATA_MASK_UDMA))) + limit |= ATA_MASK_MWDMA | ATA_MASK_UDMA; + /* PIO4, MWDMA2, UDMA2 should always be supported regardless of + cable detection result */ + limit |= ata_pack_xfermask(ATA_PIO4, ATA_MWDMA2, ATA_UDMA2); + + ata_port_dbg(ap, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, " + "BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n", + xfer_mask, limit, xfer_mask & limit, bios_limit, + saved_udma, acpi_limit, acpi_str); + + return xfer_mask & limit; +} /** * nv_probe_init - cable detection - * @ap: ATA port + * @lin: ATA link * * Perform cable detection. The BIOS stores this in PCI config * space for us. */ -static int nv_pre_reset(struct ata_port *ap) { - static const u8 bitmask[2] = {0x03, 0xC0}; +static int nv_pre_reset(struct ata_link *link, unsigned long deadline) +{ static const struct pci_bits nv_enable_bits[] = { { 0x50, 1, 0x02, 0x02 }, { 0x50, 1, 0x01, 0x01 } }; + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u8 ata66; - u16 udma; if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) return -ENOENT; - pci_read_config_byte(pdev, 0x52, &ata66); - if (ata66 & bitmask[ap->port_no]) - ap->cbl = ATA_CBL_PATA80; - else - ap->cbl = ATA_CBL_PATA40; - - /* We now have to double check because the Nvidia boxes BIOS - doesn't always set the cable bits but does set mode bits */ - - pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma); - if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400) - ap->cbl = ATA_CBL_PATA80; - return ata_std_prereset(ap); + return ata_sff_prereset(link, deadline); } -static void nv_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, nv_pre_reset, - ata_std_softreset, NULL, - ata_std_postreset); -} /** * nv100_set_piomode - set initial PIO mode data * @ap: ATA interface @@ -318,392 +379,262 @@ static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev) timing_setup(ap, adev, 0x50, adev->dma_mode, 4); } +static void nv_host_stop(struct ata_host *host) +{ + u32 udma = (unsigned long)host->private_data; + + /* restore PCI config register 0x60 */ + pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma); +} + static struct scsi_host_template amd_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), +}; + +static const struct ata_port_operations amd_base_port_ops = { + .inherits = &ata_bmdma32_port_ops, + .prereset = amd_pre_reset, }; static struct ata_port_operations amd33_port_ops = { - .port_disable = ata_port_disable, + .inherits = &amd_base_port_ops, + .cable_detect = ata_cable_40wire, .set_piomode = amd33_set_piomode, .set_dmamode = amd33_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = amd_early_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; static struct ata_port_operations amd66_port_ops = { - .port_disable = ata_port_disable, + .inherits = &amd_base_port_ops, + .cable_detect = ata_cable_unknown, .set_piomode = amd66_set_piomode, .set_dmamode = amd66_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = amd_early_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; static struct ata_port_operations amd100_port_ops = { - .port_disable = ata_port_disable, + .inherits = &amd_base_port_ops, + .cable_detect = ata_cable_unknown, .set_piomode = amd100_set_piomode, .set_dmamode = amd100_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = amd_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; static struct ata_port_operations amd133_port_ops = { - .port_disable = ata_port_disable, + .inherits = &amd_base_port_ops, + .cable_detect = amd_cable_detect, .set_piomode = amd133_set_piomode, .set_dmamode = amd133_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = amd_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop +}; + +static const struct ata_port_operations nv_base_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_ignore, + .mode_filter = nv_mode_filter, + .prereset = nv_pre_reset, + .host_stop = nv_host_stop, }; static struct ata_port_operations nv100_port_ops = { - .port_disable = ata_port_disable, + .inherits = &nv_base_port_ops, .set_piomode = nv100_set_piomode, .set_dmamode = nv100_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = nv_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; static struct ata_port_operations nv133_port_ops = { - .port_disable = ata_port_disable, + .inherits = &nv_base_port_ops, .set_piomode = nv133_set_piomode, .set_dmamode = nv133_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = nv_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; +static void amd_clear_fifo(struct pci_dev *pdev) +{ + u8 fifo; + /* Disable the FIFO, the FIFO logic will re-enable it as + appropriate */ + pci_read_config_byte(pdev, 0x41, &fifo); + fifo &= 0x0F; + pci_write_config_byte(pdev, 0x41, fifo); +} + static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { - static struct ata_port_info info[10] = { - { /* 0: AMD 7401 */ - .sht = &amd_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, /* No SWDMA */ - .udma_mask = 0x07, /* UDMA 33 */ + static const struct ata_port_info info[10] = { + { /* 0: AMD 7401 - no swdma */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA2, .port_ops = &amd33_port_ops }, { /* 1: Early AMD7409 - no swdma */ - .sht = &amd_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x1f, /* UDMA 66 */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA4, .port_ops = &amd66_port_ops }, - { /* 2: AMD 7409, no swdma errata */ - .sht = &amd_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x1f, /* UDMA 66 */ + { /* 2: AMD 7409 */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA4, .port_ops = &amd66_port_ops }, { /* 3: AMD 7411 */ - .sht = &amd_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, /* UDMA 100 */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &amd100_port_ops }, { /* 4: AMD 7441 */ - .sht = &amd_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, /* UDMA 100 */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &amd100_port_ops }, - { /* 5: AMD 8111*/ - .sht = &amd_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7f, /* UDMA 133, no swdma */ + { /* 5: AMD 8111 - no swdma */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &amd133_port_ops }, - { /* 6: AMD 8111 UDMA 100 (Serenade) */ - .sht = &amd_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, /* UDMA 100, no swdma */ + { /* 6: AMD 8111 UDMA 100 (Serenade) - no swdma */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &amd133_port_ops }, { /* 7: Nvidia Nforce */ - .sht = &amd_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, /* UDMA 100 */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &nv100_port_ops }, - { /* 8: Nvidia Nforce2 and later */ - .sht = &amd_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7f, /* UDMA 133, no swdma */ + { /* 8: Nvidia Nforce2 and later - no swdma */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &nv133_port_ops }, { /* 9: AMD CS5536 (Geode companion) */ - .sht = &amd_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, /* UDMA 100 */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &amd100_port_ops } }; - static struct ata_port_info *port_info[2]; - static int printed_version; + const struct ata_port_info *ppi[] = { NULL, NULL }; int type = id->driver_data; - u8 rev; + void *hpriv = NULL; u8 fifo; + int rc; + + ata_print_version_once(&pdev->dev, DRV_VERSION); - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + rc = pcim_enable_device(pdev); + if (rc) + return rc; - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); pci_read_config_byte(pdev, 0x41, &fifo); /* Check for AMD7409 without swdma errata and if found adjust type */ - if (type == 1 && rev > 0x7) + if (type == 1 && pdev->revision > 0x7) type = 2; - /* Check for AMD7411 */ - if (type == 3) - /* FIFO is broken */ - pci_write_config_byte(pdev, 0x41, fifo & 0x0F); - else - pci_write_config_byte(pdev, 0x41, fifo | 0xF0); - /* Serenade ? */ if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD && pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE) type = 6; /* UDMA 100 only */ + /* + * Okay, type is determined now. Apply type-specific workarounds. + */ + ppi[0] = &info[type]; + if (type < 3) - ata_pci_clear_simplex(pdev); + ata_pci_bmdma_clear_simplex(pdev); + if (pdev->vendor == PCI_VENDOR_ID_AMD) + amd_clear_fifo(pdev); + /* Cable detection on Nvidia chips doesn't work too well, + * cache BIOS programmed UDMA mode. + */ + if (type == 7 || type == 8) { + u32 udma; + + pci_read_config_dword(pdev, 0x60, &udma); + hpriv = (void *)(unsigned long)udma; + } /* And fire it up */ + return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0); +} - port_info[0] = port_info[1] = &info[type]; - return ata_pci_init_one(pdev, port_info, 2); +#ifdef CONFIG_PM_SLEEP +static int amd_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + if (pdev->vendor == PCI_VENDOR_ID_AMD) { + amd_clear_fifo(pdev); + if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 || + pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401) + ata_pci_bmdma_clear_simplex(pdev); + } + ata_host_resume(host); + return 0; } +#endif static const struct pci_device_id amd[] = { - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_COBRA_7401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7409, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_OPUS_7441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, - { 0, }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 }, + + { }, }; static struct pci_driver amd_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = amd, .probe = amd_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = amd_reinit_one, +#endif }; -static int __init amd_init(void) -{ - return pci_register_driver(&amd_pci_driver); -} - -static void __exit amd_exit(void) -{ - pci_unregister_driver(&amd_pci_driver); -} - +module_pci_driver(amd_pci_driver); MODULE_AUTHOR("Alan Cox"); -MODULE_DESCRIPTION("low-level driver for AMD PATA IDE"); +MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, amd); MODULE_VERSION(DRV_VERSION); - -module_init(amd_init); -module_exit(amd_exit); diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c new file mode 100644 index 00000000000..4edb1a81f63 --- /dev/null +++ b/drivers/ata/pata_arasan_cf.c @@ -0,0 +1,976 @@ +/* + * drivers/ata/pata_arasan_cf.c + * + * Arasan Compact Flash host controller source file + * + * Copyright (C) 2011 ST Microelectronics + * Viresh Kumar <viresh.linux@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +/* + * The Arasan CompactFlash Device Controller IP core has three basic modes of + * operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card + * ATA using true IDE modes. This driver supports only True IDE mode currently. + * + * Arasan CF Controller shares global irq register with Arasan XD Controller. + * + * Tested on arch/arm/mach-spear13xx + */ + +#include <linux/ata.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/libata.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/pata_arasan_cf_data.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#define DRIVER_NAME "arasan_cf" +#define TIMEOUT msecs_to_jiffies(3000) + +/* Registers */ +/* CompactFlash Interface Status */ +#define CFI_STS 0x000 + #define STS_CHG (1) + #define BIN_AUDIO_OUT (1 << 1) + #define CARD_DETECT1 (1 << 2) + #define CARD_DETECT2 (1 << 3) + #define INP_ACK (1 << 4) + #define CARD_READY (1 << 5) + #define IO_READY (1 << 6) + #define B16_IO_PORT_SEL (1 << 7) +/* IRQ */ +#define IRQ_STS 0x004 +/* Interrupt Enable */ +#define IRQ_EN 0x008 + #define CARD_DETECT_IRQ (1) + #define STATUS_CHNG_IRQ (1 << 1) + #define MEM_MODE_IRQ (1 << 2) + #define IO_MODE_IRQ (1 << 3) + #define TRUE_IDE_MODE_IRQ (1 << 8) + #define PIO_XFER_ERR_IRQ (1 << 9) + #define BUF_AVAIL_IRQ (1 << 10) + #define XFER_DONE_IRQ (1 << 11) + #define IGNORED_IRQS (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\ + TRUE_IDE_MODE_IRQ) + #define TRUE_IDE_IRQS (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\ + BUF_AVAIL_IRQ | XFER_DONE_IRQ) +/* Operation Mode */ +#define OP_MODE 0x00C + #define CARD_MODE_MASK (0x3) + #define MEM_MODE (0x0) + #define IO_MODE (0x1) + #define TRUE_IDE_MODE (0x2) + + #define CARD_TYPE_MASK (1 << 2) + #define CF_CARD (0) + #define CF_PLUS_CARD (1 << 2) + + #define CARD_RESET (1 << 3) + #define CFHOST_ENB (1 << 4) + #define OUTPUTS_TRISTATE (1 << 5) + #define ULTRA_DMA_ENB (1 << 8) + #define MULTI_WORD_DMA_ENB (1 << 9) + #define DRQ_BLOCK_SIZE_MASK (0x3 << 11) + #define DRQ_BLOCK_SIZE_512 (0) + #define DRQ_BLOCK_SIZE_1024 (1 << 11) + #define DRQ_BLOCK_SIZE_2048 (2 << 11) + #define DRQ_BLOCK_SIZE_4096 (3 << 11) +/* CF Interface Clock Configuration */ +#define CLK_CFG 0x010 + #define CF_IF_CLK_MASK (0XF) +/* CF Timing Mode Configuration */ +#define TM_CFG 0x014 + #define MEM_MODE_TIMING_MASK (0x3) + #define MEM_MODE_TIMING_250NS (0x0) + #define MEM_MODE_TIMING_120NS (0x1) + #define MEM_MODE_TIMING_100NS (0x2) + #define MEM_MODE_TIMING_80NS (0x3) + + #define IO_MODE_TIMING_MASK (0x3 << 2) + #define IO_MODE_TIMING_250NS (0x0 << 2) + #define IO_MODE_TIMING_120NS (0x1 << 2) + #define IO_MODE_TIMING_100NS (0x2 << 2) + #define IO_MODE_TIMING_80NS (0x3 << 2) + + #define TRUEIDE_PIO_TIMING_MASK (0x7 << 4) + #define TRUEIDE_PIO_TIMING_SHIFT 4 + + #define TRUEIDE_MWORD_DMA_TIMING_MASK (0x7 << 7) + #define TRUEIDE_MWORD_DMA_TIMING_SHIFT 7 + + #define ULTRA_DMA_TIMING_MASK (0x7 << 10) + #define ULTRA_DMA_TIMING_SHIFT 10 +/* CF Transfer Address */ +#define XFER_ADDR 0x014 + #define XFER_ADDR_MASK (0x7FF) + #define MAX_XFER_COUNT 0x20000u +/* Transfer Control */ +#define XFER_CTR 0x01C + #define XFER_COUNT_MASK (0x3FFFF) + #define ADDR_INC_DISABLE (1 << 24) + #define XFER_WIDTH_MASK (1 << 25) + #define XFER_WIDTH_8B (0) + #define XFER_WIDTH_16B (1 << 25) + + #define MEM_TYPE_MASK (1 << 26) + #define MEM_TYPE_COMMON (0) + #define MEM_TYPE_ATTRIBUTE (1 << 26) + + #define MEM_IO_XFER_MASK (1 << 27) + #define MEM_XFER (0) + #define IO_XFER (1 << 27) + + #define DMA_XFER_MODE (1 << 28) + + #define AHB_BUS_NORMAL_PIO_OPRTN (~(1 << 29)) + #define XFER_DIR_MASK (1 << 30) + #define XFER_READ (0) + #define XFER_WRITE (1 << 30) + + #define XFER_START (1 << 31) +/* Write Data Port */ +#define WRITE_PORT 0x024 +/* Read Data Port */ +#define READ_PORT 0x028 +/* ATA Data Port */ +#define ATA_DATA_PORT 0x030 + #define ATA_DATA_PORT_MASK (0xFFFF) +/* ATA Error/Features */ +#define ATA_ERR_FTR 0x034 +/* ATA Sector Count */ +#define ATA_SC 0x038 +/* ATA Sector Number */ +#define ATA_SN 0x03C +/* ATA Cylinder Low */ +#define ATA_CL 0x040 +/* ATA Cylinder High */ +#define ATA_CH 0x044 +/* ATA Select Card/Head */ +#define ATA_SH 0x048 +/* ATA Status-Command */ +#define ATA_STS_CMD 0x04C +/* ATA Alternate Status/Device Control */ +#define ATA_ASTS_DCTR 0x050 +/* Extended Write Data Port 0x200-0x3FC */ +#define EXT_WRITE_PORT 0x200 +/* Extended Read Data Port 0x400-0x5FC */ +#define EXT_READ_PORT 0x400 + #define FIFO_SIZE 0x200u +/* Global Interrupt Status */ +#define GIRQ_STS 0x800 +/* Global Interrupt Status enable */ +#define GIRQ_STS_EN 0x804 +/* Global Interrupt Signal enable */ +#define GIRQ_SGN_EN 0x808 + #define GIRQ_CF (1) + #define GIRQ_XD (1 << 1) + +/* Compact Flash Controller Dev Structure */ +struct arasan_cf_dev { + /* pointer to ata_host structure */ + struct ata_host *host; + /* clk structure */ + struct clk *clk; + + /* physical base address of controller */ + dma_addr_t pbase; + /* virtual base address of controller */ + void __iomem *vbase; + /* irq number*/ + int irq; + + /* status to be updated to framework regarding DMA transfer */ + u8 dma_status; + /* Card is present or Not */ + u8 card_present; + + /* dma specific */ + /* Completion for transfer complete interrupt from controller */ + struct completion cf_completion; + /* Completion for DMA transfer complete. */ + struct completion dma_completion; + /* Dma channel allocated */ + struct dma_chan *dma_chan; + /* Mask for DMA transfers */ + dma_cap_mask_t mask; + /* DMA transfer work */ + struct work_struct work; + /* DMA delayed finish work */ + struct delayed_work dwork; + /* qc to be transferred using DMA */ + struct ata_queued_cmd *qc; +}; + +static struct scsi_host_template arasan_cf_sht = { + ATA_BASE_SHT(DRIVER_NAME), + .sg_tablesize = SG_NONE, + .dma_boundary = 0xFFFFFFFFUL, +}; + +static void cf_dumpregs(struct arasan_cf_dev *acdev) +{ + struct device *dev = acdev->host->dev; + + dev_dbg(dev, ": =========== REGISTER DUMP ==========="); + dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS)); + dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS)); + dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN)); + dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE)); + dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG)); + dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG)); + dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR)); + dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS)); + dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN)); + dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN)); + dev_dbg(dev, ": ====================================="); +} + +/* Enable/Disable global interrupts shared between CF and XD ctrlr. */ +static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable) +{ + /* enable should be 0 or 1 */ + writel(enable, acdev->vbase + GIRQ_STS_EN); + writel(enable, acdev->vbase + GIRQ_SGN_EN); +} + +/* Enable/Disable CF interrupts */ +static inline void +cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable) +{ + u32 val = readl(acdev->vbase + IRQ_EN); + /* clear & enable/disable irqs */ + if (enable) { + writel(mask, acdev->vbase + IRQ_STS); + writel(val | mask, acdev->vbase + IRQ_EN); + } else + writel(val & ~mask, acdev->vbase + IRQ_EN); +} + +static inline void cf_card_reset(struct arasan_cf_dev *acdev) +{ + u32 val = readl(acdev->vbase + OP_MODE); + + writel(val | CARD_RESET, acdev->vbase + OP_MODE); + udelay(200); + writel(val & ~CARD_RESET, acdev->vbase + OP_MODE); +} + +static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev) +{ + writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB, + acdev->vbase + OP_MODE); + writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB, + acdev->vbase + OP_MODE); +} + +static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged) +{ + struct ata_port *ap = acdev->host->ports[0]; + struct ata_eh_info *ehi = &ap->link.eh_info; + u32 val = readl(acdev->vbase + CFI_STS); + + /* Both CD1 & CD2 should be low if card inserted completely */ + if (!(val & (CARD_DETECT1 | CARD_DETECT2))) { + if (acdev->card_present) + return; + acdev->card_present = 1; + cf_card_reset(acdev); + } else { + if (!acdev->card_present) + return; + acdev->card_present = 0; + } + + if (hotplugged) { + ata_ehi_hotplugged(ehi); + ata_port_freeze(ap); + } +} + +static int cf_init(struct arasan_cf_dev *acdev) +{ + struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev); + unsigned int if_clk; + unsigned long flags; + int ret = 0; + + ret = clk_prepare_enable(acdev->clk); + if (ret) { + dev_dbg(acdev->host->dev, "clock enable failed"); + return ret; + } + + ret = clk_set_rate(acdev->clk, 166000000); + if (ret) { + dev_warn(acdev->host->dev, "clock set rate failed"); + clk_disable_unprepare(acdev->clk); + return ret; + } + + spin_lock_irqsave(&acdev->host->lock, flags); + /* configure CF interface clock */ + /* TODO: read from device tree */ + if_clk = CF_IF_CLK_166M; + if (pdata && pdata->cf_if_clk <= CF_IF_CLK_200M) + if_clk = pdata->cf_if_clk; + + writel(if_clk, acdev->vbase + CLK_CFG); + + writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE); + cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1); + cf_ginterrupt_enable(acdev, 1); + spin_unlock_irqrestore(&acdev->host->lock, flags); + + return ret; +} + +static void cf_exit(struct arasan_cf_dev *acdev) +{ + unsigned long flags; + + spin_lock_irqsave(&acdev->host->lock, flags); + cf_ginterrupt_enable(acdev, 0); + cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0); + cf_card_reset(acdev); + writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB, + acdev->vbase + OP_MODE); + spin_unlock_irqrestore(&acdev->host->lock, flags); + clk_disable_unprepare(acdev->clk); +} + +static void dma_callback(void *dev) +{ + struct arasan_cf_dev *acdev = dev; + + complete(&acdev->dma_completion); +} + +static inline void dma_complete(struct arasan_cf_dev *acdev) +{ + struct ata_queued_cmd *qc = acdev->qc; + unsigned long flags; + + acdev->qc = NULL; + ata_sff_interrupt(acdev->irq, acdev->host); + + spin_lock_irqsave(&acdev->host->lock, flags); + if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) + ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout"); + spin_unlock_irqrestore(&acdev->host->lock, flags); +} + +static inline int wait4buf(struct arasan_cf_dev *acdev) +{ + if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) { + u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE; + + dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read"); + return -ETIMEDOUT; + } + + /* Check if PIO Error interrupt has occurred */ + if (acdev->dma_status & ATA_DMA_ERR) + return -EAGAIN; + + return 0; +} + +static int +dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len) +{ + struct dma_async_tx_descriptor *tx; + struct dma_chan *chan = acdev->dma_chan; + dma_cookie_t cookie; + unsigned long flags = DMA_PREP_INTERRUPT; + int ret = 0; + + tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); + if (!tx) { + dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n"); + return -EAGAIN; + } + + tx->callback = dma_callback; + tx->callback_param = acdev; + cookie = tx->tx_submit(tx); + + ret = dma_submit_error(cookie); + if (ret) { + dev_err(acdev->host->dev, "dma_submit_error\n"); + return ret; + } + + chan->device->device_issue_pending(chan); + + /* Wait for DMA to complete */ + if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) { + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dev_err(acdev->host->dev, "wait_for_completion_timeout\n"); + return -ETIMEDOUT; + } + + return ret; +} + +static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg) +{ + dma_addr_t dest = 0, src = 0; + u32 xfer_cnt, sglen, dma_len, xfer_ctr; + u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE; + unsigned long flags; + int ret = 0; + + sglen = sg_dma_len(sg); + if (write) { + src = sg_dma_address(sg); + dest = acdev->pbase + EXT_WRITE_PORT; + } else { + dest = sg_dma_address(sg); + src = acdev->pbase + EXT_READ_PORT; + } + + /* + * For each sg: + * MAX_XFER_COUNT data will be transferred before we get transfer + * complete interrupt. Between after FIFO_SIZE data + * buffer available interrupt will be generated. At this time we will + * fill FIFO again: max FIFO_SIZE data. + */ + while (sglen) { + xfer_cnt = min(sglen, MAX_XFER_COUNT); + spin_lock_irqsave(&acdev->host->lock, flags); + xfer_ctr = readl(acdev->vbase + XFER_CTR) & + ~XFER_COUNT_MASK; + writel(xfer_ctr | xfer_cnt | XFER_START, + acdev->vbase + XFER_CTR); + spin_unlock_irqrestore(&acdev->host->lock, flags); + + /* continue dma xfers until current sg is completed */ + while (xfer_cnt) { + /* wait for read to complete */ + if (!write) { + ret = wait4buf(acdev); + if (ret) + goto fail; + } + + /* read/write FIFO in chunk of FIFO_SIZE */ + dma_len = min(xfer_cnt, FIFO_SIZE); + ret = dma_xfer(acdev, src, dest, dma_len); + if (ret) { + dev_err(acdev->host->dev, "dma failed"); + goto fail; + } + + if (write) + src += dma_len; + else + dest += dma_len; + + sglen -= dma_len; + xfer_cnt -= dma_len; + + /* wait for write to complete */ + if (write) { + ret = wait4buf(acdev); + if (ret) + goto fail; + } + } + } + +fail: + spin_lock_irqsave(&acdev->host->lock, flags); + writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START, + acdev->vbase + XFER_CTR); + spin_unlock_irqrestore(&acdev->host->lock, flags); + + return ret; +} + +/* + * This routine uses External DMA controller to read/write data to FIFO of CF + * controller. There are two xfer related interrupt supported by CF controller: + * - buf_avail: This interrupt is generated as soon as we have buffer of 512 + * bytes available for reading or empty buffer available for writing. + * - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of + * data to/from FIFO. xfer_size is programmed in XFER_CTR register. + * + * Max buffer size = FIFO_SIZE = 512 Bytes. + * Max xfer_size = MAX_XFER_COUNT = 256 KB. + */ +static void data_xfer(struct work_struct *work) +{ + struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev, + work); + struct ata_queued_cmd *qc = acdev->qc; + struct scatterlist *sg; + unsigned long flags; + u32 temp; + int ret = 0; + + /* request dma channels */ + /* dma_request_channel may sleep, so calling from process context */ + acdev->dma_chan = dma_request_slave_channel(acdev->host->dev, "data"); + if (!acdev->dma_chan) { + dev_err(acdev->host->dev, "Unable to get dma_chan\n"); + goto chan_request_fail; + } + + for_each_sg(qc->sg, sg, qc->n_elem, temp) { + ret = sg_xfer(acdev, sg); + if (ret) + break; + } + + dma_release_channel(acdev->dma_chan); + + /* data xferred successfully */ + if (!ret) { + u32 status; + + spin_lock_irqsave(&acdev->host->lock, flags); + status = ioread8(qc->ap->ioaddr.altstatus_addr); + spin_unlock_irqrestore(&acdev->host->lock, flags); + if (status & (ATA_BUSY | ATA_DRQ)) { + ata_sff_queue_delayed_work(&acdev->dwork, 1); + return; + } + + goto sff_intr; + } + + cf_dumpregs(acdev); + +chan_request_fail: + spin_lock_irqsave(&acdev->host->lock, flags); + /* error when transferring data to/from memory */ + qc->err_mask |= AC_ERR_HOST_BUS; + qc->ap->hsm_task_state = HSM_ST_ERR; + + cf_ctrl_reset(acdev); + spin_unlock_irqrestore(qc->ap->lock, flags); +sff_intr: + dma_complete(acdev); +} + +static void delayed_finish(struct work_struct *work) +{ + struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev, + dwork.work); + struct ata_queued_cmd *qc = acdev->qc; + unsigned long flags; + u8 status; + + spin_lock_irqsave(&acdev->host->lock, flags); + status = ioread8(qc->ap->ioaddr.altstatus_addr); + spin_unlock_irqrestore(&acdev->host->lock, flags); + + if (status & (ATA_BUSY | ATA_DRQ)) + ata_sff_queue_delayed_work(&acdev->dwork, 1); + else + dma_complete(acdev); +} + +static irqreturn_t arasan_cf_interrupt(int irq, void *dev) +{ + struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data; + unsigned long flags; + u32 irqsts; + + irqsts = readl(acdev->vbase + GIRQ_STS); + if (!(irqsts & GIRQ_CF)) + return IRQ_NONE; + + spin_lock_irqsave(&acdev->host->lock, flags); + irqsts = readl(acdev->vbase + IRQ_STS); + writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */ + writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */ + + /* handle only relevant interrupts */ + irqsts &= ~IGNORED_IRQS; + + if (irqsts & CARD_DETECT_IRQ) { + cf_card_detect(acdev, 1); + spin_unlock_irqrestore(&acdev->host->lock, flags); + return IRQ_HANDLED; + } + + if (irqsts & PIO_XFER_ERR_IRQ) { + acdev->dma_status = ATA_DMA_ERR; + writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START, + acdev->vbase + XFER_CTR); + spin_unlock_irqrestore(&acdev->host->lock, flags); + complete(&acdev->cf_completion); + dev_err(acdev->host->dev, "pio xfer err irq\n"); + return IRQ_HANDLED; + } + + spin_unlock_irqrestore(&acdev->host->lock, flags); + + if (irqsts & BUF_AVAIL_IRQ) { + complete(&acdev->cf_completion); + return IRQ_HANDLED; + } + + if (irqsts & XFER_DONE_IRQ) { + struct ata_queued_cmd *qc = acdev->qc; + + /* Send Complete only for write */ + if (qc->tf.flags & ATA_TFLAG_WRITE) + complete(&acdev->cf_completion); + } + + return IRQ_HANDLED; +} + +static void arasan_cf_freeze(struct ata_port *ap) +{ + struct arasan_cf_dev *acdev = ap->host->private_data; + + /* stop transfer and reset controller */ + writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START, + acdev->vbase + XFER_CTR); + cf_ctrl_reset(acdev); + acdev->dma_status = ATA_DMA_ERR; + + ata_sff_dma_pause(ap); + ata_sff_freeze(ap); +} + +static void arasan_cf_error_handler(struct ata_port *ap) +{ + struct arasan_cf_dev *acdev = ap->host->private_data; + + /* + * DMA transfers using an external DMA controller may be scheduled. + * Abort them before handling error. Refer data_xfer() for further + * details. + */ + cancel_work_sync(&acdev->work); + cancel_delayed_work_sync(&acdev->dwork); + return ata_sff_error_handler(ap); +} + +static void arasan_cf_dma_start(struct arasan_cf_dev *acdev) +{ + struct ata_queued_cmd *qc = acdev->qc; + struct ata_port *ap = qc->ap; + struct ata_taskfile *tf = &qc->tf; + u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK; + u32 write = tf->flags & ATA_TFLAG_WRITE; + + xfer_ctr |= write ? XFER_WRITE : XFER_READ; + writel(xfer_ctr, acdev->vbase + XFER_CTR); + + ap->ops->sff_exec_command(ap, tf); + ata_sff_queue_work(&acdev->work); +} + +static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct arasan_cf_dev *acdev = ap->host->private_data; + + /* defer PIO handling to sff_qc_issue */ + if (!ata_is_dma(qc->tf.protocol)) + return ata_sff_qc_issue(qc); + + /* select the device */ + ata_wait_idle(ap); + ata_sff_dev_select(ap, qc->dev->devno); + ata_wait_idle(ap); + + /* start the command */ + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); + + ap->ops->sff_tf_load(ap, &qc->tf); + acdev->dma_status = 0; + acdev->qc = qc; + arasan_cf_dma_start(acdev); + ap->hsm_task_state = HSM_ST_LAST; + break; + + default: + WARN_ON(1); + return AC_ERR_SYSTEM; + } + + return 0; +} + +static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + struct arasan_cf_dev *acdev = ap->host->private_data; + u8 pio = adev->pio_mode - XFER_PIO_0; + unsigned long flags; + u32 val; + + /* Arasan ctrl supports Mode0 -> Mode6 */ + if (pio > 6) { + dev_err(ap->dev, "Unknown PIO mode\n"); + return; + } + + spin_lock_irqsave(&acdev->host->lock, flags); + val = readl(acdev->vbase + OP_MODE) & + ~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK); + writel(val, acdev->vbase + OP_MODE); + val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK; + val |= pio << TRUEIDE_PIO_TIMING_SHIFT; + writel(val, acdev->vbase + TM_CFG); + + cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0); + cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1); + spin_unlock_irqrestore(&acdev->host->lock, flags); +} + +static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev) +{ + struct arasan_cf_dev *acdev = ap->host->private_data; + u32 opmode, tmcfg, dma_mode = adev->dma_mode; + unsigned long flags; + + spin_lock_irqsave(&acdev->host->lock, flags); + opmode = readl(acdev->vbase + OP_MODE) & + ~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB); + tmcfg = readl(acdev->vbase + TM_CFG); + + if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) { + opmode |= ULTRA_DMA_ENB; + tmcfg &= ~ULTRA_DMA_TIMING_MASK; + tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT; + } else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) { + opmode |= MULTI_WORD_DMA_ENB; + tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK; + tmcfg |= (dma_mode - XFER_MW_DMA_0) << + TRUEIDE_MWORD_DMA_TIMING_SHIFT; + } else { + dev_err(ap->dev, "Unknown DMA mode\n"); + spin_unlock_irqrestore(&acdev->host->lock, flags); + return; + } + + writel(opmode, acdev->vbase + OP_MODE); + writel(tmcfg, acdev->vbase + TM_CFG); + writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR); + + cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0); + cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1); + spin_unlock_irqrestore(&acdev->host->lock, flags); +} + +static struct ata_port_operations arasan_cf_ops = { + .inherits = &ata_sff_port_ops, + .freeze = arasan_cf_freeze, + .error_handler = arasan_cf_error_handler, + .qc_issue = arasan_cf_qc_issue, + .set_piomode = arasan_cf_set_piomode, + .set_dmamode = arasan_cf_set_dmamode, +}; + +static int arasan_cf_probe(struct platform_device *pdev) +{ + struct arasan_cf_dev *acdev; + struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev); + struct ata_host *host; + struct ata_port *ap; + struct resource *res; + u32 quirk; + irq_handler_t irq_handler = NULL; + int ret = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), + DRIVER_NAME)) { + dev_warn(&pdev->dev, "Failed to get memory region resource\n"); + return -ENOENT; + } + + acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL); + if (!acdev) { + dev_warn(&pdev->dev, "kzalloc fail\n"); + return -ENOMEM; + } + + if (pdata) + quirk = pdata->quirk; + else + quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */ + + /* if irq is 0, support only PIO */ + acdev->irq = platform_get_irq(pdev, 0); + if (acdev->irq) + irq_handler = arasan_cf_interrupt; + else + quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA; + + acdev->pbase = res->start; + acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (!acdev->vbase) { + dev_warn(&pdev->dev, "ioremap fail\n"); + return -ENOMEM; + } + + acdev->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(acdev->clk)) { + dev_warn(&pdev->dev, "Clock not found\n"); + return PTR_ERR(acdev->clk); + } + + /* allocate host */ + host = ata_host_alloc(&pdev->dev, 1); + if (!host) { + ret = -ENOMEM; + dev_warn(&pdev->dev, "alloc host fail\n"); + goto free_clk; + } + + ap = host->ports[0]; + host->private_data = acdev; + acdev->host = host; + ap->ops = &arasan_cf_ops; + ap->pio_mask = ATA_PIO6; + ap->mwdma_mask = ATA_MWDMA4; + ap->udma_mask = ATA_UDMA6; + + init_completion(&acdev->cf_completion); + init_completion(&acdev->dma_completion); + INIT_WORK(&acdev->work, data_xfer); + INIT_DELAYED_WORK(&acdev->dwork, delayed_finish); + dma_cap_set(DMA_MEMCPY, acdev->mask); + + /* Handle platform specific quirks */ + if (quirk) { + if (quirk & CF_BROKEN_PIO) { + ap->ops->set_piomode = NULL; + ap->pio_mask = 0; + } + if (quirk & CF_BROKEN_MWDMA) + ap->mwdma_mask = 0; + if (quirk & CF_BROKEN_UDMA) + ap->udma_mask = 0; + } + ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI; + + ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT; + ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT; + ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR; + ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR; + ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC; + ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN; + ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL; + ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH; + ap->ioaddr.device_addr = acdev->vbase + ATA_SH; + ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD; + ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD; + ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR; + ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR; + + ata_port_desc(ap, "phy_addr %llx virt_addr %p", + (unsigned long long) res->start, acdev->vbase); + + ret = cf_init(acdev); + if (ret) + goto free_clk; + + cf_card_detect(acdev, 0); + + ret = ata_host_activate(host, acdev->irq, irq_handler, 0, + &arasan_cf_sht); + if (!ret) + return 0; + + cf_exit(acdev); +free_clk: + clk_put(acdev->clk); + return ret; +} + +static int arasan_cf_remove(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + struct arasan_cf_dev *acdev = host->ports[0]->private_data; + + ata_host_detach(host); + cf_exit(acdev); + clk_put(acdev->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int arasan_cf_suspend(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct arasan_cf_dev *acdev = host->ports[0]->private_data; + + if (acdev->dma_chan) + acdev->dma_chan->device->device_control(acdev->dma_chan, + DMA_TERMINATE_ALL, 0); + + cf_exit(acdev); + return ata_host_suspend(host, PMSG_SUSPEND); +} + +static int arasan_cf_resume(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct arasan_cf_dev *acdev = host->ports[0]->private_data; + + cf_init(acdev); + ata_host_resume(host); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume); + +#ifdef CONFIG_OF +static const struct of_device_id arasan_cf_id_table[] = { + { .compatible = "arasan,cf-spear1340" }, + {} +}; +MODULE_DEVICE_TABLE(of, arasan_cf_id_table); +#endif + +static struct platform_driver arasan_cf_driver = { + .probe = arasan_cf_probe, + .remove = arasan_cf_remove, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .pm = &arasan_cf_pm_ops, + .of_match_table = of_match_ptr(arasan_cf_id_table), + }, +}; + +module_platform_driver(arasan_cf_driver); + +MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_DESCRIPTION("Arasan ATA Compact Flash driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index c4ccb75a4f1..96c05c9494f 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c @@ -1,7 +1,8 @@ /* * pata_artop.c - ARTOP ATA controller driver * - * (C) 2006 Red Hat <alan@redhat.com> + * (C) 2006 Red Hat + * (C) 2007,2011 Bartlomiej Zolnierkiewicz * * Based in part on drivers/ide/pci/aec62xx.c * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org> @@ -11,7 +12,6 @@ * performance Alessandro Zummo <alessandro.zummo@towertech.it> * * TODO - * 850 serialization once the core supports it * Investigate no_dsc on 850R * Clock detect */ @@ -19,7 +19,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> @@ -28,7 +27,7 @@ #include <linux/ata.h> #define DRV_NAME "pata_artop" -#define DRV_VERSION "0.4.2" +#define DRV_VERSION "0.4.6" /* * The ARTOP has 33 Mhz and "over clocked" timing tables. Until we @@ -39,79 +38,47 @@ static int clock = 0; -static int artop6210_pre_reset(struct ata_port *ap) -{ - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - const struct pci_bits artop_enable_bits[] = { - { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ - { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */ - }; - - if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) - return -ENOENT; - - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - /** - * artop6210_error_handler - Probe specified port on PATA host controller - * @ap: Port to probe + * artop62x0_pre_reset - probe begin + * @link: link + * @deadline: deadline jiffies for the operation * - * LOCKING: - * None (inherited from caller). - */ - -static void artop6210_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, artop6210_pre_reset, - ata_std_softreset, NULL, - ata_std_postreset); -} - -/** - * artop6260_pre_reset - check for 40/80 pin - * @ap: Port - * - * The ARTOP hardware reports the cable detect bits in register 0x49. * Nothing complicated needed here. */ -static int artop6260_pre_reset(struct ata_port *ap) +static int artop62x0_pre_reset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits artop_enable_bits[] = { { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */ }; + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u8 tmp; - /* Odd numbered device ids are the units with enable bits (the -R cards) */ - if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) + /* Odd numbered device ids are the units with enable bits. */ + if ((pdev->device & 1) && + !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) return -ENOENT; - pci_read_config_byte(pdev, 0x49, &tmp); - if (tmp & (1 >> ap->port_no)) - ap->cbl = ATA_CBL_PATA40; - else - ap->cbl = ATA_CBL_PATA80; - return ata_std_prereset(ap); + return ata_sff_prereset(link, deadline); } /** - * artop6260_error_handler - Probe specified port on PATA host controller - * @ap: Port to probe + * artop6260_cable_detect - identify cable type + * @ap: Port * - * LOCKING: - * None (inherited from caller). + * Identify the cable type for the ARTOP interface in question */ -static void artop6260_error_handler(struct ata_port *ap) +static int artop6260_cable_detect(struct ata_port *ap) { - ata_bmdma_drive_eh(ap, artop6260_pre_reset, - ata_std_softreset, NULL, - ata_std_postreset); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u8 tmp; + pci_read_config_byte(pdev, 0x49, &tmp); + if (tmp & (1 << ap->port_no)) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; } /** @@ -225,7 +192,7 @@ static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev) /** * artop6210_set_dmamode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring - * @adev: um + * @adev: Device whose timings we are configuring * * Set DMA mode for device, in host controller PCI config space. * @@ -299,90 +266,79 @@ static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev) pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra); } +/** + * artop_6210_qc_defer - implement serialization + * @qc: command + * + * Issue commands per host on this chip. + */ + +static int artop6210_qc_defer(struct ata_queued_cmd *qc) +{ + struct ata_host *host = qc->ap->host; + struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; + int rc; + + /* First apply the usual rules */ + rc = ata_std_qc_defer(qc); + if (rc != 0) + return rc; + + /* Now apply serialization rules. Only allow a command if the + other channel state machine is idle */ + if (alt && alt->qc_active) + return ATA_DEFER_PORT; + return 0; +} + static struct scsi_host_template artop_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; -static const struct ata_port_operations artop6210_ops = { - .port_disable = ata_port_disable, +static struct ata_port_operations artop6210_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_40wire, .set_piomode = artop6210_set_piomode, .set_dmamode = artop6210_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = artop6210_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .prereset = artop62x0_pre_reset, + .qc_defer = artop6210_qc_defer, }; -static const struct ata_port_operations artop6260_ops = { - .port_disable = ata_port_disable, +static struct ata_port_operations artop6260_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = artop6260_cable_detect, .set_piomode = artop6260_set_piomode, .set_dmamode = artop6260_set_dmamode, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = artop6260_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .prereset = artop62x0_pre_reset, }; +static void atp8xx_fixup(struct pci_dev *pdev) +{ + if (pdev->device == 0x0005) + /* BIOS may have left us in UDMA, clear it before libata probe */ + pci_write_config_byte(pdev, 0x54, 0); + else if (pdev->device == 0x0008 || pdev->device == 0x0009) { + u8 reg; + + /* Mac systems come up with some registers not set as we + will need them */ + + /* Clear reset & test bits */ + pci_read_config_byte(pdev, 0x49, ®); + pci_write_config_byte(pdev, 0x49, reg & ~0x30); + + /* PCI latency must be > 0x80 for burst mode, tweak it + * if required. + */ + pci_read_config_byte(pdev, PCI_LATENCY_TIMER, ®); + if (reg <= 0x80) + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90); + + /* Enable IRQ output and burst mode */ + pci_read_config_byte(pdev, 0x4a, ®); + pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80); + } +} /** * artop_init_one - Register ARTOP ATA PCI device with kernel services @@ -400,113 +356,104 @@ static const struct ata_port_operations artop6260_ops = { static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) { - static int printed_version; - static struct ata_port_info info_6210 = { - .sht = &artop_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ + static const struct ata_port_info info_6210 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &artop6210_ops, }; - static struct ata_port_info info_626x = { - .sht = &artop_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ + static const struct ata_port_info info_626x = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &artop6260_ops, }; - static struct ata_port_info info_626x_fast = { - .sht = &artop_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ + static const struct ata_port_info info_628x = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &artop6260_ops, }; - struct ata_port_info *port_info[2]; - struct ata_port_info *info; - int ports = 2; + static const struct ata_port_info info_628x_fast = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &artop6260_ops, + }; + const struct ata_port_info *ppi[] = { NULL, NULL }; + int rc; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, - "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - if (id->driver_data == 0) { /* 6210 variant */ - info = &info_6210; - /* BIOS may have left us in UDMA, clear it before libata probe */ - pci_write_config_byte(pdev, 0x54, 0); - /* For the moment (also lacks dsc) */ - printk(KERN_WARNING "ARTOP 6210 requires serialize functionality not yet supported by libata.\n"); - printk(KERN_WARNING "Secondary ATA ports will not be activated.\n"); - ports = 1; - } + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + if (id->driver_data == 0) /* 6210 variant */ + ppi[0] = &info_6210; else if (id->driver_data == 1) /* 6260 */ - info = &info_626x; - else if (id->driver_data == 2) { /* 6260 or 6260 + fast */ + ppi[0] = &info_626x; + else if (id->driver_data == 2) { /* 6280 or 6280 + fast */ unsigned long io = pci_resource_start(pdev, 4); - u8 reg; - info = &info_626x; + ppi[0] = &info_628x; if (inb(io) & 0x10) - info = &info_626x_fast; - /* Mac systems come up with some registers not set as we - will need them */ + ppi[0] = &info_628x_fast; + } - /* Clear reset & test bits */ - pci_read_config_byte(pdev, 0x49, ®); - pci_write_config_byte(pdev, 0x49, reg & ~ 0x30); + BUG_ON(ppi[0] == NULL); - /* PCI latency must be > 0x80 for burst mode, tweak it - * if required. - */ - pci_read_config_byte(pdev, PCI_LATENCY_TIMER, ®); - if (reg <= 0x80) - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90); - - /* Enable IRQ output and burst mode */ - pci_read_config_byte(pdev, 0x4a, ®); - pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80); + atp8xx_fixup(pdev); - } - port_info[0] = port_info[1] = info; - return ata_pci_init_one(pdev, port_info, ports); + return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0); } static const struct pci_device_id artop_pci_tbl[] = { - { 0x1191, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - { 0x1191, 0x0006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, - { 0x1191, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, - { 0x1191, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, - { 0x1191, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, + { PCI_VDEVICE(ARTOP, 0x0005), 0 }, + { PCI_VDEVICE(ARTOP, 0x0006), 1 }, + { PCI_VDEVICE(ARTOP, 0x0007), 1 }, + { PCI_VDEVICE(ARTOP, 0x0008), 2 }, + { PCI_VDEVICE(ARTOP, 0x0009), 2 }, + { } /* terminate list */ }; +#ifdef CONFIG_PM_SLEEP +static int atp8xx_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + atp8xx_fixup(pdev); + + ata_host_resume(host); + return 0; +} +#endif + static struct pci_driver artop_pci_driver = { .name = DRV_NAME, .id_table = artop_pci_tbl, .probe = artop_init_one, .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = atp8xx_reinit_one, +#endif }; -static int __init artop_init(void) -{ - return pci_register_driver(&artop_pci_driver); -} - -static void __exit artop_exit(void) -{ - pci_unregister_driver(&artop_pci_driver); -} - - -module_init(artop_init); -module_exit(artop_exit); +module_pci_driver(artop_pci_driver); -MODULE_AUTHOR("Alan Cox"); +MODULE_AUTHOR("Alan Cox, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, artop_pci_tbl); MODULE_VERSION(DRV_VERSION); - diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c new file mode 100644 index 00000000000..d59d5239405 --- /dev/null +++ b/drivers/ata/pata_at32.c @@ -0,0 +1,401 @@ +/* + * AVR32 SMC/CFC PATA Driver + * + * Copyright (C) 2007 Atmel Norway + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + */ + +#define DEBUG + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/slab.h> +#include <scsi/scsi_host.h> +#include <linux/ata.h> +#include <linux/libata.h> +#include <linux/err.h> +#include <linux/io.h> + +#include <mach/board.h> +#include <mach/smc.h> + +#define DRV_NAME "pata_at32" +#define DRV_VERSION "0.0.3" + +/* + * CompactFlash controller memory layout relative to the base address: + * + * Attribute memory: 0000 0000 -> 003f ffff + * Common memory: 0040 0000 -> 007f ffff + * I/O memory: 0080 0000 -> 00bf ffff + * True IDE Mode: 00c0 0000 -> 00df ffff + * Alt IDE Mode: 00e0 0000 -> 00ff ffff + * + * Only True IDE and Alt True IDE mode are needed for this driver. + * + * True IDE mode => CS0 = 0, CS1 = 1 (cmd, error, stat, etc) + * Alt True IDE mode => CS0 = 1, CS1 = 0 (ctl, alt_stat) + */ +#define CF_IDE_OFFSET 0x00c00000 +#define CF_ALT_IDE_OFFSET 0x00e00000 +#define CF_RES_SIZE 2048 + +/* + * Define DEBUG_BUS if you are doing debugging of your own EBI -> PATA + * adaptor with a logic analyzer or similar. + */ +#undef DEBUG_BUS + +/* + * ATA PIO modes + * + * Name | Mb/s | Min cycle time | Mask + * --------+-------+----------------+-------- + * Mode 0 | 3.3 | 600 ns | 0x01 + * Mode 1 | 5.2 | 383 ns | 0x03 + * Mode 2 | 8.3 | 240 ns | 0x07 + * Mode 3 | 11.1 | 180 ns | 0x0f + * Mode 4 | 16.7 | 120 ns | 0x1f + * + * Alter PIO_MASK below according to table to set maximal PIO mode. + */ +enum { + PIO_MASK = ATA_PIO4, +}; + +/* + * Struct containing private information about device. + */ +struct at32_ide_info { + unsigned int irq; + struct resource res_ide; + struct resource res_alt; + void __iomem *ide_addr; + void __iomem *alt_addr; + unsigned int cs; + struct smc_config smc; +}; + +/* + * Setup SMC for the given ATA timing. + */ +static int pata_at32_setup_timing(struct device *dev, + struct at32_ide_info *info, + const struct ata_timing *ata) +{ + struct smc_config *smc = &info->smc; + struct smc_timing timing; + + int active; + int recover; + + memset(&timing, 0, sizeof(struct smc_timing)); + + /* Total cycle time */ + timing.read_cycle = ata->cyc8b; + + /* DIOR <= CFIOR timings */ + timing.nrd_setup = ata->setup; + timing.nrd_pulse = ata->act8b; + timing.nrd_recover = ata->rec8b; + + /* Convert nanosecond timing to clock cycles */ + smc_set_timing(smc, &timing); + + /* Add one extra cycle setup due to signal ring */ + smc->nrd_setup = smc->nrd_setup + 1; + + active = smc->nrd_setup + smc->nrd_pulse; + recover = smc->read_cycle - active; + + /* Need at least two cycles recovery */ + if (recover < 2) + smc->read_cycle = active + 2; + + /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */ + smc->ncs_read_setup = 1; + smc->ncs_read_pulse = smc->read_cycle - 2; + + /* Write timings same as read timings */ + smc->write_cycle = smc->read_cycle; + smc->nwe_setup = smc->nrd_setup; + smc->nwe_pulse = smc->nrd_pulse; + smc->ncs_write_setup = smc->ncs_read_setup; + smc->ncs_write_pulse = smc->ncs_read_pulse; + + /* Do some debugging output of ATA and SMC timings */ + dev_dbg(dev, "ATA: C=%d S=%d P=%d R=%d\n", + ata->cyc8b, ata->setup, ata->act8b, ata->rec8b); + + dev_dbg(dev, "SMC: C=%d S=%d P=%d NS=%d NP=%d\n", + smc->read_cycle, smc->nrd_setup, smc->nrd_pulse, + smc->ncs_read_setup, smc->ncs_read_pulse); + + /* Finally, configure the SMC */ + return smc_set_configuration(info->cs, smc); +} + +/* + * Procedures for libATA. + */ +static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + struct ata_timing timing; + struct at32_ide_info *info = ap->host->private_data; + + int ret; + + /* Compute ATA timing */ + ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0); + if (ret) { + dev_warn(ap->dev, "Failed to compute ATA timing %d\n", ret); + return; + } + + /* Setup SMC to ATA timing */ + ret = pata_at32_setup_timing(ap->dev, info, &timing); + if (ret) { + dev_warn(ap->dev, "Failed to setup ATA timing %d\n", ret); + return; + } +} + +static struct scsi_host_template at32_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static struct ata_port_operations at32_port_ops = { + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = pata_at32_set_piomode, +}; + +static int __init pata_at32_init_one(struct device *dev, + struct at32_ide_info *info) +{ + struct ata_host *host; + struct ata_port *ap; + + host = ata_host_alloc(dev, 1); + if (!host) + return -ENOMEM; + + ap = host->ports[0]; + + /* Setup ATA bindings */ + ap->ops = &at32_port_ops; + ap->pio_mask = PIO_MASK; + ap->flags |= ATA_FLAG_SLAVE_POSS; + + /* + * Since all 8-bit taskfile transfers has to go on the lower + * byte of the data bus and there is a bug in the SMC that + * makes it impossible to alter the bus width during runtime, + * we need to hardwire the address signals as follows: + * + * A_IDE(2:0) <= A_EBI(3:1) + * + * This makes all addresses on the EBI even, thus all data + * will be on the lower byte of the data bus. All addresses + * used by libATA need to be altered according to this. + */ + ap->ioaddr.altstatus_addr = info->alt_addr + (0x06 << 1); + ap->ioaddr.ctl_addr = info->alt_addr + (0x06 << 1); + + ap->ioaddr.data_addr = info->ide_addr + (ATA_REG_DATA << 1); + ap->ioaddr.error_addr = info->ide_addr + (ATA_REG_ERR << 1); + ap->ioaddr.feature_addr = info->ide_addr + (ATA_REG_FEATURE << 1); + ap->ioaddr.nsect_addr = info->ide_addr + (ATA_REG_NSECT << 1); + ap->ioaddr.lbal_addr = info->ide_addr + (ATA_REG_LBAL << 1); + ap->ioaddr.lbam_addr = info->ide_addr + (ATA_REG_LBAM << 1); + ap->ioaddr.lbah_addr = info->ide_addr + (ATA_REG_LBAH << 1); + ap->ioaddr.device_addr = info->ide_addr + (ATA_REG_DEVICE << 1); + ap->ioaddr.status_addr = info->ide_addr + (ATA_REG_STATUS << 1); + ap->ioaddr.command_addr = info->ide_addr + (ATA_REG_CMD << 1); + + /* Set info as private data of ATA host */ + host->private_data = info; + + /* Register ATA device and return */ + return ata_host_activate(host, info->irq, ata_sff_interrupt, + IRQF_SHARED | IRQF_TRIGGER_RISING, + &at32_sht); +} + +/* + * This function may come in handy for people analyzing their own + * EBI -> PATA adaptors. + */ +#ifdef DEBUG_BUS + +static void __init pata_at32_debug_bus(struct device *dev, + struct at32_ide_info *info) +{ + const int d1 = 0xff; + const int d2 = 0x00; + + int i; + + /* Write 8-bit values (registers) */ + iowrite8(d1, info->alt_addr + (0x06 << 1)); + iowrite8(d2, info->alt_addr + (0x06 << 1)); + + for (i = 0; i < 8; i++) { + iowrite8(d1, info->ide_addr + (i << 1)); + iowrite8(d2, info->ide_addr + (i << 1)); + } + + /* Write 16 bit values (data) */ + iowrite16(d1, info->ide_addr); + iowrite16(d1 << 8, info->ide_addr); + + iowrite16(d1, info->ide_addr); + iowrite16(d1 << 8, info->ide_addr); +} + +#endif + +static int __init pata_at32_probe(struct platform_device *pdev) +{ + const struct ata_timing initial_timing = + {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; + + struct device *dev = &pdev->dev; + struct at32_ide_info *info; + struct ide_platform_data *board = dev_get_platdata(&pdev->dev); + struct resource *res; + + int irq; + int ret; + + if (!board) + return -ENXIO; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + /* Retrive IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + /* Setup struct containing private information */ + info = kzalloc(sizeof(struct at32_ide_info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->irq = irq; + info->cs = board->cs; + + /* Request memory resources */ + info->res_ide.start = res->start + CF_IDE_OFFSET; + info->res_ide.end = info->res_ide.start + CF_RES_SIZE - 1; + info->res_ide.name = "ide"; + info->res_ide.flags = IORESOURCE_MEM; + + ret = request_resource(res, &info->res_ide); + if (ret) + goto err_req_res_ide; + + info->res_alt.start = res->start + CF_ALT_IDE_OFFSET; + info->res_alt.end = info->res_alt.start + CF_RES_SIZE - 1; + info->res_alt.name = "alt"; + info->res_alt.flags = IORESOURCE_MEM; + + ret = request_resource(res, &info->res_alt); + if (ret) + goto err_req_res_alt; + + /* Setup non-timing elements of SMC */ + info->smc.bus_width = 2; /* 16 bit data bus */ + info->smc.nrd_controlled = 1; /* Sample data on rising edge of NRD */ + info->smc.nwe_controlled = 0; /* Drive data on falling edge of NCS */ + info->smc.nwait_mode = 3; /* NWAIT is in READY mode */ + info->smc.byte_write = 0; /* Byte select access type */ + info->smc.tdf_mode = 0; /* TDF optimization disabled */ + info->smc.tdf_cycles = 0; /* No TDF wait cycles */ + + /* Setup SMC to ATA timing */ + ret = pata_at32_setup_timing(dev, info, &initial_timing); + if (ret) + goto err_setup_timing; + + /* Map ATA address space */ + ret = -ENOMEM; + info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16); + info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16); + if (!info->ide_addr || !info->alt_addr) + goto err_ioremap; + +#ifdef DEBUG_BUS + pata_at32_debug_bus(dev, info); +#endif + + /* Setup and register ATA device */ + ret = pata_at32_init_one(dev, info); + if (ret) + goto err_ata_device; + + return 0; + + err_ata_device: + err_ioremap: + err_setup_timing: + release_resource(&info->res_alt); + err_req_res_alt: + release_resource(&info->res_ide); + err_req_res_ide: + kfree(info); + + return ret; +} + +static int __exit pata_at32_remove(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + struct at32_ide_info *info; + + if (!host) + return 0; + + info = host->private_data; + ata_host_detach(host); + + if (!info) + return 0; + + release_resource(&info->res_ide); + release_resource(&info->res_alt); + + kfree(info); + + return 0; +} + +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:at32_ide"); + +static struct platform_driver pata_at32_driver = { + .remove = __exit_p(pata_at32_remove), + .driver = { + .name = "at32_ide", + .owner = THIS_MODULE, + }, +}; + +module_platform_driver_probe(pata_at32_driver, pata_at32_probe); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AVR32 SMC/CFC PATA Driver"); +MODULE_AUTHOR("Kristoffer Nyborg Gregertsen <kngregertsen@norway.atmel.com>"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c new file mode 100644 index 00000000000..8a66f23af4c --- /dev/null +++ b/drivers/ata/pata_at91.c @@ -0,0 +1,457 @@ +/* + * PATA driver for AT91SAM9260 Static Memory Controller + * with CompactFlash interface in True IDE mode + * + * Copyright (C) 2009 Matyukevich Sergey + * 2011 Igor Plyatov + * + * Based on: + * * generic platform driver by Paul Mundt: drivers/ata/pata_platform.c + * * pata_at32 driver by Kristoffer Nyborg Gregertsen + * * at91_ide driver by Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/blkdev.h> +#include <linux/gfp.h> +#include <scsi/scsi_host.h> +#include <linux/ata.h> +#include <linux/clk.h> +#include <linux/libata.h> +#include <linux/platform_device.h> +#include <linux/ata_platform.h> +#include <linux/platform_data/atmel.h> + +#include <mach/at91sam9_smc.h> +#include <asm/gpio.h> + +#define DRV_NAME "pata_at91" +#define DRV_VERSION "0.3" + +#define CF_IDE_OFFSET 0x00c00000 +#define CF_ALT_IDE_OFFSET 0x00e00000 +#define CF_IDE_RES_SIZE 0x08 +#define CS_PULSE_MAXIMUM 319 +#define ER_SMC_CALC 1 +#define ER_SMC_RECALC 2 + +struct at91_ide_info { + unsigned long mode; + unsigned int cs; + struct clk *mck; + void __iomem *ide_addr; + void __iomem *alt_addr; +}; + +/** + * struct smc_range - range of valid values for SMC register. + */ +struct smc_range { + int min; + int max; +}; + +/** + * adjust_smc_value - adjust value for one of SMC registers. + * @value: adjusted value + * @range: array of SMC ranges with valid values + * @size: SMC ranges array size + * + * This returns the difference between input and output value or negative + * in case of invalid input value. + * If negative returned, then output value = maximal possible from ranges. + */ +static int adjust_smc_value(int *value, struct smc_range *range, int size) +{ + int maximum = (range + size - 1)->max; + int remainder; + + do { + if (*value < range->min) { + remainder = range->min - *value; + *value = range->min; /* nearest valid value */ + return remainder; + } else if ((range->min <= *value) && (*value <= range->max)) + return 0; + + range++; + } while (--size); + *value = maximum; + + return -1; /* invalid value */ +} + +/** + * calc_smc_vals - calculate SMC register values + * @dev: ATA device + * @setup: SMC_SETUP register value + * @pulse: SMC_PULSE register value + * @cycle: SMC_CYCLE register value + * + * This returns negative in case of invalid values for SMC registers: + * -ER_SMC_RECALC - recalculation required for SMC values, + * -ER_SMC_CALC - calculation failed (invalid input values). + * + * SMC use special coding scheme, see "Coding and Range of Timing + * Parameters" table from AT91SAM9 datasheets. + * + * SMC_SETUP = 128*setup[5] + setup[4:0] + * SMC_PULSE = 256*pulse[6] + pulse[5:0] + * SMC_CYCLE = 256*cycle[8:7] + cycle[6:0] + */ +static int calc_smc_vals(struct device *dev, + int *setup, int *pulse, int *cycle, int *cs_pulse) +{ + int ret_val; + int err = 0; + struct smc_range range_setup[] = { /* SMC_SETUP valid values */ + {.min = 0, .max = 31}, /* first range */ + {.min = 128, .max = 159} /* second range */ + }; + struct smc_range range_pulse[] = { /* SMC_PULSE valid values */ + {.min = 0, .max = 63}, /* first range */ + {.min = 256, .max = 319} /* second range */ + }; + struct smc_range range_cycle[] = { /* SMC_CYCLE valid values */ + {.min = 0, .max = 127}, /* first range */ + {.min = 256, .max = 383}, /* second range */ + {.min = 512, .max = 639}, /* third range */ + {.min = 768, .max = 895} /* fourth range */ + }; + + ret_val = adjust_smc_value(setup, range_setup, ARRAY_SIZE(range_setup)); + if (ret_val < 0) + dev_warn(dev, "maximal SMC Setup value\n"); + else + *cycle += ret_val; + + ret_val = adjust_smc_value(pulse, range_pulse, ARRAY_SIZE(range_pulse)); + if (ret_val < 0) + dev_warn(dev, "maximal SMC Pulse value\n"); + else + *cycle += ret_val; + + ret_val = adjust_smc_value(cycle, range_cycle, ARRAY_SIZE(range_cycle)); + if (ret_val < 0) + dev_warn(dev, "maximal SMC Cycle value\n"); + + *cs_pulse = *cycle; + if (*cs_pulse > CS_PULSE_MAXIMUM) { + dev_err(dev, "unable to calculate valid SMC settings\n"); + return -ER_SMC_CALC; + } + + ret_val = adjust_smc_value(cs_pulse, range_pulse, + ARRAY_SIZE(range_pulse)); + if (ret_val < 0) { + dev_warn(dev, "maximal SMC CS Pulse value\n"); + } else if (ret_val != 0) { + *cycle = *cs_pulse; + dev_warn(dev, "SMC Cycle extended\n"); + err = -ER_SMC_RECALC; + } + + return err; +} + +/** + * to_smc_format - convert values into SMC format + * @setup: SETUP value of SMC Setup Register + * @pulse: PULSE value of SMC Pulse Register + * @cycle: CYCLE value of SMC Cycle Register + * @cs_pulse: NCS_PULSE value of SMC Pulse Register + */ +static void to_smc_format(int *setup, int *pulse, int *cycle, int *cs_pulse) +{ + *setup = (*setup & 0x1f) | ((*setup & 0x80) >> 2); + *pulse = (*pulse & 0x3f) | ((*pulse & 0x100) >> 2); + *cycle = (*cycle & 0x7f) | ((*cycle & 0x300) >> 1); + *cs_pulse = (*cs_pulse & 0x3f) | ((*cs_pulse & 0x100) >> 2); +} + +static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz) +{ + unsigned long mul; + + /* + * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] = + * x * (f / 1_000_000_000) = + * x * ((f * 65536) / 1_000_000_000) / 65536 = + * x * (((f / 10_000) * 65536) / 100_000) / 65536 = + */ + + mul = (mck_hz / 10000) << 16; + mul /= 100000; + + return (ns * mul + 65536) >> 16; /* rounding */ +} + +/** + * set_smc_timing - SMC timings setup. + * @dev: device + * @info: AT91 IDE info + * @ata: ATA timings + * + * Its assumed that write timings are same as read timings, + * cs_setup = 0 and cs_pulse = cycle. + */ +static void set_smc_timing(struct device *dev, struct ata_device *adev, + struct at91_ide_info *info, const struct ata_timing *ata) +{ + int ret = 0; + int use_iordy; + struct sam9_smc_config smc; + unsigned int t6z; /* data tristate time in ns */ + unsigned int cycle; /* SMC Cycle width in MCK ticks */ + unsigned int setup; /* SMC Setup width in MCK ticks */ + unsigned int pulse; /* CFIOR and CFIOW pulse width in MCK ticks */ + unsigned int cs_pulse; /* CS4 or CS5 pulse width in MCK ticks*/ + unsigned int tdf_cycles; /* SMC TDF MCK ticks */ + unsigned long mck_hz; /* MCK frequency in Hz */ + + t6z = (ata->mode < XFER_PIO_5) ? 30 : 20; + mck_hz = clk_get_rate(info->mck); + cycle = calc_mck_cycles(ata->cyc8b, mck_hz); + setup = calc_mck_cycles(ata->setup, mck_hz); + pulse = calc_mck_cycles(ata->act8b, mck_hz); + tdf_cycles = calc_mck_cycles(t6z, mck_hz); + + do { + ret = calc_smc_vals(dev, &setup, &pulse, &cycle, &cs_pulse); + } while (ret == -ER_SMC_RECALC); + + if (ret == -ER_SMC_CALC) + dev_err(dev, "Interface may not operate correctly\n"); + + dev_dbg(dev, "SMC Setup=%u, Pulse=%u, Cycle=%u, CS Pulse=%u\n", + setup, pulse, cycle, cs_pulse); + to_smc_format(&setup, &pulse, &cycle, &cs_pulse); + /* disable or enable waiting for IORDY signal */ + use_iordy = ata_pio_need_iordy(adev); + if (use_iordy) + info->mode |= AT91_SMC_EXNWMODE_READY; + + if (tdf_cycles > 15) { + tdf_cycles = 15; + dev_warn(dev, "maximal SMC TDF Cycles value\n"); + } + + dev_dbg(dev, "Use IORDY=%u, TDF Cycles=%u\n", use_iordy, tdf_cycles); + + /* SMC Setup Register */ + smc.nwe_setup = smc.nrd_setup = setup; + smc.ncs_write_setup = smc.ncs_read_setup = 0; + /* SMC Pulse Register */ + smc.nwe_pulse = smc.nrd_pulse = pulse; + smc.ncs_write_pulse = smc.ncs_read_pulse = cs_pulse; + /* SMC Cycle Register */ + smc.write_cycle = smc.read_cycle = cycle; + /* SMC Mode Register*/ + smc.tdf_cycles = tdf_cycles; + smc.mode = info->mode; + + sam9_smc_configure(0, info->cs, &smc); +} + +static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + struct at91_ide_info *info = ap->host->private_data; + struct ata_timing timing; + int ret; + + /* Compute ATA timing and set it to SMC */ + ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0); + if (ret) { + dev_warn(ap->dev, "Failed to compute ATA timing %d, " + "set PIO_0 timing\n", ret); + timing = *ata_timing_find_mode(XFER_PIO_0); + } + set_smc_timing(ap->dev, adev, info, &timing); +} + +static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw) +{ + struct at91_ide_info *info = dev->link->ap->host->private_data; + unsigned int consumed; + unsigned long flags; + struct sam9_smc_config smc; + + local_irq_save(flags); + sam9_smc_read_mode(0, info->cs, &smc); + + /* set 16bit mode before writing data */ + smc.mode = (smc.mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_16; + sam9_smc_write_mode(0, info->cs, &smc); + + consumed = ata_sff_data_xfer(dev, buf, buflen, rw); + + /* restore 8bit mode after data is written */ + smc.mode = (smc.mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_8; + sam9_smc_write_mode(0, info->cs, &smc); + + local_irq_restore(flags); + return consumed; +} + +static struct scsi_host_template pata_at91_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static struct ata_port_operations pata_at91_port_ops = { + .inherits = &ata_sff_port_ops, + + .sff_data_xfer = pata_at91_data_xfer_noirq, + .set_piomode = pata_at91_set_piomode, + .cable_detect = ata_cable_40wire, +}; + +static int pata_at91_probe(struct platform_device *pdev) +{ + struct at91_cf_data *board = dev_get_platdata(&pdev->dev); + struct device *dev = &pdev->dev; + struct at91_ide_info *info; + struct resource *mem_res; + struct ata_host *host; + struct ata_port *ap; + + int irq_flags = 0; + int irq = 0; + int ret; + + /* get platform resources: IO/CTL memories and irq/rst pins */ + + if (pdev->num_resources != 1) { + dev_err(&pdev->dev, "invalid number of resources\n"); + return -EINVAL; + } + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!mem_res) { + dev_err(dev, "failed to get mem resource\n"); + return -EINVAL; + } + + irq = board->irq_pin; + + /* init ata host */ + + host = ata_host_alloc(dev, 1); + + if (!host) + return -ENOMEM; + + ap = host->ports[0]; + ap->ops = &pata_at91_port_ops; + ap->flags |= ATA_FLAG_SLAVE_POSS; + ap->pio_mask = ATA_PIO4; + + if (!gpio_is_valid(irq)) { + ap->flags |= ATA_FLAG_PIO_POLLING; + ata_port_desc(ap, "no IRQ, using PIO polling"); + } + + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); + + if (!info) { + dev_err(dev, "failed to allocate memory for private data\n"); + return -ENOMEM; + } + + info->mck = clk_get(NULL, "mck"); + + if (IS_ERR(info->mck)) { + dev_err(dev, "failed to get access to mck clock\n"); + return -ENODEV; + } + + info->cs = board->chipselect; + info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | + AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT | + AT91_SMC_DBW_8 | AT91_SMC_TDF_(0); + + info->ide_addr = devm_ioremap(dev, + mem_res->start + CF_IDE_OFFSET, CF_IDE_RES_SIZE); + + if (!info->ide_addr) { + dev_err(dev, "failed to map IO base\n"); + ret = -ENOMEM; + goto err_put; + } + + info->alt_addr = devm_ioremap(dev, + mem_res->start + CF_ALT_IDE_OFFSET, CF_IDE_RES_SIZE); + + if (!info->alt_addr) { + dev_err(dev, "failed to map CTL base\n"); + ret = -ENOMEM; + goto err_put; + } + + ap->ioaddr.cmd_addr = info->ide_addr; + ap->ioaddr.ctl_addr = info->alt_addr + 0x06; + ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; + + ata_sff_std_ports(&ap->ioaddr); + + ata_port_desc(ap, "mmio cmd 0x%llx ctl 0x%llx", + (unsigned long long)mem_res->start + CF_IDE_OFFSET, + (unsigned long long)mem_res->start + CF_ALT_IDE_OFFSET); + + host->private_data = info; + + ret = ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0, + gpio_is_valid(irq) ? ata_sff_interrupt : NULL, + irq_flags, &pata_at91_sht); + if (ret) + goto err_put; + + return 0; + +err_put: + clk_put(info->mck); + return ret; +} + +static int pata_at91_remove(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + struct at91_ide_info *info; + + if (!host) + return 0; + info = host->private_data; + + ata_host_detach(host); + + if (!info) + return 0; + + clk_put(info->mck); + + return 0; +} + +static struct platform_driver pata_at91_driver = { + .probe = pata_at91_probe, + .remove = pata_at91_remove, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(pata_at91_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Driver for CF in True IDE mode on AT91SAM9260 SoC"); +MODULE_AUTHOR("Matyukevich Sergey"); +MODULE_VERSION(DRV_VERSION); + diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 6c2269b6bd3..970f7767e5f 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -1,7 +1,7 @@ /* * pata_atiixp.c - ATI PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox <alan@redhat.com> + * (C) 2009-2010 Bartlomiej Zolnierkiewicz * * Based on * @@ -15,14 +15,14 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> +#include <linux/dmi.h> #define DRV_NAME "pata_atiixp" -#define DRV_VERSION "0.4.3" +#define DRV_VERSION "0.4.6" enum { ATIIXP_IDE_PIO_TIMING = 0x40, @@ -33,24 +33,59 @@ enum { ATIIXP_IDE_UDMA_MODE = 0x56 }; -static int atiixp_pre_reset(struct ata_port *ap) +static const struct dmi_system_id attixp_cable_override_dmi_table[] = { + { + /* Board has onboard PATA<->SATA converters */ + .ident = "MSI E350DM-E33", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "MSI"), + DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"), + }, + }, + { } +}; + +static int atiixp_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); - static struct pci_bits atiixp_enable_bits[] = { + u8 udma; + + if (dmi_check_system(attixp_cable_override_dmi_table)) + return ATA_CBL_PATA40_SHORT; + + /* Hack from drivers/ide/pci. Really we want to know how to do the + raw detection not play follow the bios mode guess */ + pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma); + if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40) + return ATA_CBL_PATA80; + return ATA_CBL_PATA40; +} + +static DEFINE_SPINLOCK(atiixp_lock); + +/** + * atiixp_prereset - perform reset handling + * @link: ATA link + * @deadline: deadline jiffies for the operation + * + * Reset sequence checking enable bits to see which ports are + * active. + */ + +static int atiixp_prereset(struct ata_link *link, unsigned long deadline) +{ + static const struct pci_bits atiixp_enable_bits[] = { { 0x48, 1, 0x01, 0x00 }, { 0x48, 1, 0x08, 0x00 } }; + struct ata_port *ap = link->ap; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) return -ENOENT; - ap->cbl = ATA_CBL_PATA80; - return ata_std_prereset(ap); -} - -static void atiixp_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, atiixp_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + return ata_sff_prereset(link, deadline); } /** @@ -69,20 +104,19 @@ static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, struct pci_dev *pdev = to_pci_dev(ap->host->dev); int dn = 2 * ap->port_no + adev->devno; - - /* Check this is correct - the order is odd in both drivers */ int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); - u16 pio_mode_data, pio_timing_data; + u32 pio_timing_data; + u16 pio_mode_data; pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); pio_mode_data &= ~(0x7 << (4 * dn)); pio_mode_data |= pio << (4 * dn); pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data); - pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); - pio_mode_data &= ~(0xFF << timing_shift); - pio_mode_data |= (pio_timings[pio] << timing_shift); - pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); + pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); + pio_timing_data &= ~(0xFF << timing_shift); + pio_timing_data |= (pio_timings[pio] << timing_shift); + pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); } /** @@ -96,7 +130,10 @@ static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev) { + unsigned long flags; + spin_lock_irqsave(&atiixp_lock, flags); atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0); + spin_unlock_irqrestore(&atiixp_lock, flags); } /** @@ -116,6 +153,9 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev) int dma = adev->dma_mode; int dn = 2 * ap->port_no + adev->devno; int wanted_pio; + unsigned long flags; + + spin_lock_irqsave(&atiixp_lock, flags); if (adev->dma_mode >= XFER_UDMA_0) { u16 udma_mode_data; @@ -127,16 +167,17 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev) udma_mode_data |= dma << (4 * dn); pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data); } else { - u16 mwdma_timing_data; - /* Check this is correct - the order is odd in both drivers */ int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); + u32 mwdma_timing_data; dma -= XFER_MW_DMA_0; - pci_read_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, &mwdma_timing_data); + pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING, + &mwdma_timing_data); mwdma_timing_data &= ~(0xFF << timing_shift); mwdma_timing_data |= (mwdma_timings[dma] << timing_shift); - pci_write_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, mwdma_timing_data); + pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING, + mwdma_timing_data); } /* * We must now look at the PIO mode situation. We may need to @@ -152,6 +193,7 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev) if (adev->pio_mode != wanted_pio) atiixp_set_pio_timing(ap, adev, wanted_pio); + spin_unlock_irqrestore(&atiixp_lock, flags); } /** @@ -160,6 +202,9 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev) * * When DMA begins we need to ensure that the UDMA control * register for the channel is correctly set. + * + * Note: The host lock held by the libata layer protects + * us from two channels both trying to set DMA bits at once */ static void atiixp_bmdma_start(struct ata_queued_cmd *qc) @@ -172,7 +217,7 @@ static void atiixp_bmdma_start(struct ata_queued_cmd *qc) u16 tmp16; pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); - if (adev->dma_mode >= XFER_UDMA_0) + if (ata_using_udma(adev)) tmp16 |= (1 << dn); else tmp16 &= ~(1 << dn); @@ -186,6 +231,9 @@ static void atiixp_bmdma_start(struct ata_queued_cmd *qc) * * DMA has completed. Clear the UDMA flag as the next operations will * be PIO ones not UDMA data transfer. + * + * Note: The host lock held by the libata layer protects + * us from two channels both trying to set DMA bits at once */ static void atiixp_bmdma_stop(struct ata_queued_cmd *qc) @@ -202,103 +250,64 @@ static void atiixp_bmdma_stop(struct ata_queued_cmd *qc) } static struct scsi_host_template atiixp_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), + .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; static struct ata_port_operations atiixp_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = atiixp_set_piomode, - .set_dmamode = atiixp_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = atiixp_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, + .inherits = &ata_bmdma_port_ops, + + .qc_prep = ata_bmdma_dumb_qc_prep, .bmdma_start = atiixp_bmdma_start, .bmdma_stop = atiixp_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .prereset = atiixp_prereset, + .cable_detect = atiixp_cable_detect, + .set_piomode = atiixp_set_piomode, + .set_dmamode = atiixp_set_dmamode, }; -static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) +static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { - static struct ata_port_info info = { - .sht = &atiixp_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x06, /* No MWDMA0 support */ - .udma_mask = 0x3F, + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, + .udma_mask = ATA_UDMA5, .port_ops = &atiixp_port_ops }; - static struct ata_port_info *port_info[2] = { &info, &info }; - return ata_pci_init_one(dev, port_info, 2); + const struct ata_port_info *ppi[] = { &info, &info }; + + return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, + ATA_HOST_PARALLEL_SCAN); } -static struct pci_device_id atiixp[] = { - { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), }, - { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), }, - { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), }, - { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), }, - { 0, }, +static const struct pci_device_id atiixp[] = { + { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), }, + { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), }, + { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), }, + { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), }, + { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), }, + + { }, }; static struct pci_driver atiixp_pci_driver = { .name = DRV_NAME, .id_table = atiixp, .probe = atiixp_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .resume = ata_pci_device_resume, + .suspend = ata_pci_device_suspend, +#endif }; -static int __init atiixp_init(void) -{ - return pci_register_driver(&atiixp_pci_driver); -} - - -static void __exit atiixp_exit(void) -{ - pci_unregister_driver(&atiixp_pci_driver); -} - +module_pci_driver(atiixp_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, atiixp); MODULE_VERSION(DRV_VERSION); - -module_init(atiixp_init); -module_exit(atiixp_exit); diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c new file mode 100644 index 00000000000..a705cfca90f --- /dev/null +++ b/drivers/ata/pata_atp867x.c @@ -0,0 +1,573 @@ +/* + * pata_atp867x.c - ARTOP 867X 64bit 4-channel UDMA133 ATA controller driver + * + * (C) 2009 Google Inc. John(Jung-Ik) Lee <jilee@google.com> + * + * Per Atp867 data sheet rev 1.2, Acard. + * Based in part on early ide code from + * 2003-2004 by Eric Uhrhane, Google, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * TODO: + * 1. RAID features [comparison, XOR, striping, mirroring, etc.] + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> + +#define DRV_NAME "pata_atp867x" +#define DRV_VERSION "0.7.5" + +/* + * IO Registers + * Note that all runtime hot priv ports are cached in ap private_data + */ + +enum { + ATP867X_IO_CHANNEL_OFFSET = 0x10, + + /* + * IO Register Bitfields + */ + + ATP867X_IO_PIOSPD_ACTIVE_SHIFT = 4, + ATP867X_IO_PIOSPD_RECOVER_SHIFT = 0, + + ATP867X_IO_DMAMODE_MSTR_SHIFT = 0, + ATP867X_IO_DMAMODE_MSTR_MASK = 0x07, + ATP867X_IO_DMAMODE_SLAVE_SHIFT = 4, + ATP867X_IO_DMAMODE_SLAVE_MASK = 0x70, + + ATP867X_IO_DMAMODE_UDMA_6 = 0x07, + ATP867X_IO_DMAMODE_UDMA_5 = 0x06, + ATP867X_IO_DMAMODE_UDMA_4 = 0x05, + ATP867X_IO_DMAMODE_UDMA_3 = 0x04, + ATP867X_IO_DMAMODE_UDMA_2 = 0x03, + ATP867X_IO_DMAMODE_UDMA_1 = 0x02, + ATP867X_IO_DMAMODE_UDMA_0 = 0x01, + ATP867X_IO_DMAMODE_DISABLE = 0x00, + + ATP867X_IO_SYS_INFO_66MHZ = 0x04, + ATP867X_IO_SYS_INFO_SLOW_UDMA5 = 0x02, + ATP867X_IO_SYS_MASK_RESERVED = (~0xf1), + + ATP867X_IO_PORTSPD_VAL = 0x1143, + ATP867X_PREREAD_VAL = 0x0200, + + ATP867X_NUM_PORTS = 4, + ATP867X_BAR_IOBASE = 0, + ATP867X_BAR_ROMBASE = 6, +}; + +#define ATP867X_IOBASE(ap) ((ap)->host->iomap[0]) +#define ATP867X_SYS_INFO(ap) (0x3F + ATP867X_IOBASE(ap)) + +#define ATP867X_IO_PORTBASE(ap, port) (0x00 + ATP867X_IOBASE(ap) + \ + (port) * ATP867X_IO_CHANNEL_OFFSET) +#define ATP867X_IO_DMABASE(ap, port) (0x40 + \ + ATP867X_IO_PORTBASE((ap), (port))) + +#define ATP867X_IO_STATUS(ap, port) (0x07 + \ + ATP867X_IO_PORTBASE((ap), (port))) +#define ATP867X_IO_ALTSTATUS(ap, port) (0x0E + \ + ATP867X_IO_PORTBASE((ap), (port))) + +/* + * hot priv ports + */ +#define ATP867X_IO_MSTRPIOSPD(ap, port) (0x08 + \ + ATP867X_IO_DMABASE((ap), (port))) +#define ATP867X_IO_SLAVPIOSPD(ap, port) (0x09 + \ + ATP867X_IO_DMABASE((ap), (port))) +#define ATP867X_IO_8BPIOSPD(ap, port) (0x0A + \ + ATP867X_IO_DMABASE((ap), (port))) +#define ATP867X_IO_DMAMODE(ap, port) (0x0B + \ + ATP867X_IO_DMABASE((ap), (port))) + +#define ATP867X_IO_PORTSPD(ap, port) (0x4A + \ + ATP867X_IO_PORTBASE((ap), (port))) +#define ATP867X_IO_PREREAD(ap, port) (0x4C + \ + ATP867X_IO_PORTBASE((ap), (port))) + +struct atp867x_priv { + void __iomem *dma_mode; + void __iomem *mstr_piospd; + void __iomem *slave_piospd; + void __iomem *eightb_piospd; + int pci66mhz; +}; + +static void atp867x_set_dmamode(struct ata_port *ap, struct ata_device *adev) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct atp867x_priv *dp = ap->private_data; + u8 speed = adev->dma_mode; + u8 b; + u8 mode = speed - XFER_UDMA_0 + 1; + + /* + * Doc 6.6.9: decrease the udma mode value by 1 for safer UDMA speed + * on 66MHz bus + * rev-A: UDMA_1~4 (5, 6 no change) + * rev-B: all UDMA modes + * UDMA_0 stays not to disable UDMA + */ + if (dp->pci66mhz && mode > ATP867X_IO_DMAMODE_UDMA_0 && + (pdev->device == PCI_DEVICE_ID_ARTOP_ATP867B || + mode < ATP867X_IO_DMAMODE_UDMA_5)) + mode--; + + b = ioread8(dp->dma_mode); + if (adev->devno & 1) { + b = (b & ~ATP867X_IO_DMAMODE_SLAVE_MASK) | + (mode << ATP867X_IO_DMAMODE_SLAVE_SHIFT); + } else { + b = (b & ~ATP867X_IO_DMAMODE_MSTR_MASK) | + (mode << ATP867X_IO_DMAMODE_MSTR_SHIFT); + } + iowrite8(b, dp->dma_mode); +} + +static int atp867x_get_active_clocks_shifted(struct ata_port *ap, + unsigned int clk) +{ + struct atp867x_priv *dp = ap->private_data; + unsigned char clocks = clk; + + /* + * Doc 6.6.9: increase the clock value by 1 for safer PIO speed + * on 66MHz bus + */ + if (dp->pci66mhz) + clocks++; + + switch (clocks) { + case 0: + clocks = 1; + break; + case 1 ... 6: + break; + default: + printk(KERN_WARNING "ATP867X: active %dclk is invalid. " + "Using 12clk.\n", clk); + case 9 ... 12: + clocks = 7; /* 12 clk */ + break; + case 7: + case 8: /* default 8 clk */ + clocks = 0; + goto active_clock_shift_done; + } + +active_clock_shift_done: + return clocks << ATP867X_IO_PIOSPD_ACTIVE_SHIFT; +} + +static int atp867x_get_recover_clocks_shifted(unsigned int clk) +{ + unsigned char clocks = clk; + + switch (clocks) { + case 0: + clocks = 1; + break; + case 1 ... 11: + break; + case 13: + case 14: + --clocks; /* by the spec */ + break; + case 15: + break; + default: + printk(KERN_WARNING "ATP867X: recover %dclk is invalid. " + "Using default 12clk.\n", clk); + case 12: /* default 12 clk */ + clocks = 0; + break; + } + + return clocks << ATP867X_IO_PIOSPD_RECOVER_SHIFT; +} + +static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + struct ata_device *peer = ata_dev_pair(adev); + struct atp867x_priv *dp = ap->private_data; + u8 speed = adev->pio_mode; + struct ata_timing t, p; + int T, UT; + u8 b; + + T = 1000000000 / 33333; + UT = T / 4; + + ata_timing_compute(adev, speed, &t, T, UT); + if (peer && peer->pio_mode) { + ata_timing_compute(peer, peer->pio_mode, &p, T, UT); + ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT); + } + + b = ioread8(dp->dma_mode); + if (adev->devno & 1) + b = (b & ~ATP867X_IO_DMAMODE_SLAVE_MASK); + else + b = (b & ~ATP867X_IO_DMAMODE_MSTR_MASK); + iowrite8(b, dp->dma_mode); + + b = atp867x_get_active_clocks_shifted(ap, t.active) | + atp867x_get_recover_clocks_shifted(t.recover); + + if (adev->devno & 1) + iowrite8(b, dp->slave_piospd); + else + iowrite8(b, dp->mstr_piospd); + + b = atp867x_get_active_clocks_shifted(ap, t.act8b) | + atp867x_get_recover_clocks_shifted(t.rec8b); + + iowrite8(b, dp->eightb_piospd); +} + +static int atp867x_cable_override(struct pci_dev *pdev) +{ + if (pdev->subsystem_vendor == PCI_VENDOR_ID_ARTOP && + (pdev->subsystem_device == PCI_DEVICE_ID_ARTOP_ATP867A || + pdev->subsystem_device == PCI_DEVICE_ID_ARTOP_ATP867B)) { + return 1; + } + return 0; +} + +static int atp867x_cable_detect(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + + if (atp867x_cable_override(pdev)) + return ATA_CBL_PATA40_SHORT; + + return ATA_CBL_PATA_UNK; +} + +static struct scsi_host_template atp867x_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations atp867x_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = atp867x_cable_detect, + .set_piomode = atp867x_set_piomode, + .set_dmamode = atp867x_set_dmamode, +}; + + +#ifdef ATP867X_DEBUG +static void atp867x_check_res(struct pci_dev *pdev) +{ + int i; + unsigned long start, len; + + /* Check the PCI resources for this channel are enabled */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + start = pci_resource_start(pdev, i); + len = pci_resource_len(pdev, i); + printk(KERN_DEBUG "ATP867X: resource start:len=%lx:%lx\n", + start, len); + } +} + +static void atp867x_check_ports(struct ata_port *ap, int port) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + struct atp867x_priv *dp = ap->private_data; + + printk(KERN_DEBUG "ATP867X: port[%d] addresses\n" + " cmd_addr =0x%llx, 0x%llx\n" + " ctl_addr =0x%llx, 0x%llx\n" + " bmdma_addr =0x%llx, 0x%llx\n" + " data_addr =0x%llx\n" + " error_addr =0x%llx\n" + " feature_addr =0x%llx\n" + " nsect_addr =0x%llx\n" + " lbal_addr =0x%llx\n" + " lbam_addr =0x%llx\n" + " lbah_addr =0x%llx\n" + " device_addr =0x%llx\n" + " status_addr =0x%llx\n" + " command_addr =0x%llx\n" + " dp->dma_mode =0x%llx\n" + " dp->mstr_piospd =0x%llx\n" + " dp->slave_piospd =0x%llx\n" + " dp->eightb_piospd =0x%llx\n" + " dp->pci66mhz =0x%lx\n", + port, + (unsigned long long)ioaddr->cmd_addr, + (unsigned long long)ATP867X_IO_PORTBASE(ap, port), + (unsigned long long)ioaddr->ctl_addr, + (unsigned long long)ATP867X_IO_ALTSTATUS(ap, port), + (unsigned long long)ioaddr->bmdma_addr, + (unsigned long long)ATP867X_IO_DMABASE(ap, port), + (unsigned long long)ioaddr->data_addr, + (unsigned long long)ioaddr->error_addr, + (unsigned long long)ioaddr->feature_addr, + (unsigned long long)ioaddr->nsect_addr, + (unsigned long long)ioaddr->lbal_addr, + (unsigned long long)ioaddr->lbam_addr, + (unsigned long long)ioaddr->lbah_addr, + (unsigned long long)ioaddr->device_addr, + (unsigned long long)ioaddr->status_addr, + (unsigned long long)ioaddr->command_addr, + (unsigned long long)dp->dma_mode, + (unsigned long long)dp->mstr_piospd, + (unsigned long long)dp->slave_piospd, + (unsigned long long)dp->eightb_piospd, + (unsigned long)dp->pci66mhz); +} +#endif + +static int atp867x_set_priv(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct atp867x_priv *dp; + int port = ap->port_no; + + dp = ap->private_data = + devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL); + if (dp == NULL) + return -ENOMEM; + + dp->dma_mode = ATP867X_IO_DMAMODE(ap, port); + dp->mstr_piospd = ATP867X_IO_MSTRPIOSPD(ap, port); + dp->slave_piospd = ATP867X_IO_SLAVPIOSPD(ap, port); + dp->eightb_piospd = ATP867X_IO_8BPIOSPD(ap, port); + + dp->pci66mhz = + ioread8(ATP867X_SYS_INFO(ap)) & ATP867X_IO_SYS_INFO_66MHZ; + + return 0; +} + +static void atp867x_fixup(struct ata_host *host) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + struct ata_port *ap = host->ports[0]; + int i; + u8 v; + + /* + * Broken BIOS might not set latency high enough + */ + pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &v); + if (v < 0x80) { + v = 0x80; + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, v); + printk(KERN_DEBUG "ATP867X: set latency timer of device %s" + " to %d\n", pci_name(pdev), v); + } + + /* + * init 8bit io ports speed(0aaarrrr) to 43h and + * init udma modes of master/slave to 0/0(11h) + */ + for (i = 0; i < ATP867X_NUM_PORTS; i++) + iowrite16(ATP867X_IO_PORTSPD_VAL, ATP867X_IO_PORTSPD(ap, i)); + + /* + * init PreREAD counts + */ + for (i = 0; i < ATP867X_NUM_PORTS; i++) + iowrite16(ATP867X_PREREAD_VAL, ATP867X_IO_PREREAD(ap, i)); + + v = ioread8(ATP867X_IOBASE(ap) + 0x28); + v &= 0xcf; /* Enable INTA#: bit4=0 means enable */ + v |= 0xc0; /* Enable PCI burst, MRM & not immediate interrupts */ + iowrite8(v, ATP867X_IOBASE(ap) + 0x28); + + /* + * Turn off the over clocked udma5 mode, only for Rev-B + */ + v = ioread8(ATP867X_SYS_INFO(ap)); + v &= ATP867X_IO_SYS_MASK_RESERVED; + if (pdev->device == PCI_DEVICE_ID_ARTOP_ATP867B) + v |= ATP867X_IO_SYS_INFO_SLOW_UDMA5; + iowrite8(v, ATP867X_SYS_INFO(ap)); +} + +static int atp867x_ata_pci_sff_init_host(struct ata_host *host) +{ + struct device *gdev = host->dev; + struct pci_dev *pdev = to_pci_dev(gdev); + unsigned int mask = 0; + int i, rc; + + /* + * do not map rombase + */ + rc = pcim_iomap_regions(pdev, 1 << ATP867X_BAR_IOBASE, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + host->iomap = pcim_iomap_table(pdev); + +#ifdef ATP867X_DEBUG + atp867x_check_res(pdev); + + for (i = 0; i < PCI_ROM_RESOURCE; i++) + printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i, + (unsigned long long)(host->iomap[i])); +#endif + + /* + * request, iomap BARs and init port addresses accordingly + */ + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + struct ata_ioports *ioaddr = &ap->ioaddr; + + ioaddr->cmd_addr = ATP867X_IO_PORTBASE(ap, i); + ioaddr->ctl_addr = ioaddr->altstatus_addr + = ATP867X_IO_ALTSTATUS(ap, i); + ioaddr->bmdma_addr = ATP867X_IO_DMABASE(ap, i); + + ata_sff_std_ports(ioaddr); + rc = atp867x_set_priv(ap); + if (rc) + return rc; + +#ifdef ATP867X_DEBUG + atp867x_check_ports(ap, i); +#endif + ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", + (unsigned long)ioaddr->cmd_addr, + (unsigned long)ioaddr->ctl_addr); + ata_port_desc(ap, "bmdma 0x%lx", + (unsigned long)ioaddr->bmdma_addr); + + mask |= 1 << i; + } + + if (!mask) { + dev_err(gdev, "no available native port\n"); + return -ENODEV; + } + + atp867x_fixup(host); + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + return rc; +} + +static int atp867x_init_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static const struct ata_port_info info_867x = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &atp867x_ops, + }; + + struct ata_host *host; + const struct ata_port_info *ppi[] = { &info_867x, NULL }; + int rc; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + printk(KERN_INFO "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)", + pdev->device); + + host = ata_host_alloc_pinfo(&pdev->dev, ppi, ATP867X_NUM_PORTS); + if (!host) { + dev_err(&pdev->dev, "failed to allocate ATA host\n"); + rc = -ENOMEM; + goto err_out; + } + + rc = atp867x_ata_pci_sff_init_host(host); + if (rc) { + dev_err(&pdev->dev, "failed to init host\n"); + goto err_out; + } + + pci_set_master(pdev); + + rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &atp867x_sht); + if (rc) + dev_err(&pdev->dev, "failed to activate host\n"); + +err_out: + return rc; +} + +#ifdef CONFIG_PM_SLEEP +static int atp867x_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + atp867x_fixup(host); + + ata_host_resume(host); + return 0; +} +#endif + +static struct pci_device_id atp867x_pci_tbl[] = { + { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867A), 0 }, + { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867B), 0 }, + { }, +}; + +static struct pci_driver atp867x_driver = { + .name = DRV_NAME, + .id_table = atp867x_pci_tbl, + .probe = atp867x_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = atp867x_reinit_one, +#endif +}; + +module_pci_driver(atp867x_driver); + +MODULE_AUTHOR("John(Jung-Ik) Lee, Google Inc."); +MODULE_DESCRIPTION("low level driver for Artop/Acard 867x ATA controller"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, atp867x_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c new file mode 100644 index 00000000000..03f2f2bc83b --- /dev/null +++ b/drivers/ata/pata_bf54x.c @@ -0,0 +1,1705 @@ +/* + * File: drivers/ata/pata_bf54x.c + * Author: Sonic Zhang <sonic.zhang@analog.com> + * + * Created: + * Description: PATA Driver for blackfin 54x + * + * Modified: + * Copyright 2007 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> +#include <linux/platform_device.h> +#include <asm/dma.h> +#include <asm/gpio.h> +#include <asm/portmux.h> + +#define DRV_NAME "pata-bf54x" +#define DRV_VERSION "0.9" + +#define ATA_REG_CTRL 0x0E +#define ATA_REG_ALTSTATUS ATA_REG_CTRL + +/* These are the offset of the controller's registers */ +#define ATAPI_OFFSET_CONTROL 0x00 +#define ATAPI_OFFSET_STATUS 0x04 +#define ATAPI_OFFSET_DEV_ADDR 0x08 +#define ATAPI_OFFSET_DEV_TXBUF 0x0c +#define ATAPI_OFFSET_DEV_RXBUF 0x10 +#define ATAPI_OFFSET_INT_MASK 0x14 +#define ATAPI_OFFSET_INT_STATUS 0x18 +#define ATAPI_OFFSET_XFER_LEN 0x1c +#define ATAPI_OFFSET_LINE_STATUS 0x20 +#define ATAPI_OFFSET_SM_STATE 0x24 +#define ATAPI_OFFSET_TERMINATE 0x28 +#define ATAPI_OFFSET_PIO_TFRCNT 0x2c +#define ATAPI_OFFSET_DMA_TFRCNT 0x30 +#define ATAPI_OFFSET_UMAIN_TFRCNT 0x34 +#define ATAPI_OFFSET_UDMAOUT_TFRCNT 0x38 +#define ATAPI_OFFSET_REG_TIM_0 0x40 +#define ATAPI_OFFSET_PIO_TIM_0 0x44 +#define ATAPI_OFFSET_PIO_TIM_1 0x48 +#define ATAPI_OFFSET_MULTI_TIM_0 0x50 +#define ATAPI_OFFSET_MULTI_TIM_1 0x54 +#define ATAPI_OFFSET_MULTI_TIM_2 0x58 +#define ATAPI_OFFSET_ULTRA_TIM_0 0x60 +#define ATAPI_OFFSET_ULTRA_TIM_1 0x64 +#define ATAPI_OFFSET_ULTRA_TIM_2 0x68 +#define ATAPI_OFFSET_ULTRA_TIM_3 0x6c + + +#define ATAPI_GET_CONTROL(base)\ + bfin_read16(base + ATAPI_OFFSET_CONTROL) +#define ATAPI_SET_CONTROL(base, val)\ + bfin_write16(base + ATAPI_OFFSET_CONTROL, val) +#define ATAPI_GET_STATUS(base)\ + bfin_read16(base + ATAPI_OFFSET_STATUS) +#define ATAPI_GET_DEV_ADDR(base)\ + bfin_read16(base + ATAPI_OFFSET_DEV_ADDR) +#define ATAPI_SET_DEV_ADDR(base, val)\ + bfin_write16(base + ATAPI_OFFSET_DEV_ADDR, val) +#define ATAPI_GET_DEV_TXBUF(base)\ + bfin_read16(base + ATAPI_OFFSET_DEV_TXBUF) +#define ATAPI_SET_DEV_TXBUF(base, val)\ + bfin_write16(base + ATAPI_OFFSET_DEV_TXBUF, val) +#define ATAPI_GET_DEV_RXBUF(base)\ + bfin_read16(base + ATAPI_OFFSET_DEV_RXBUF) +#define ATAPI_SET_DEV_RXBUF(base, val)\ + bfin_write16(base + ATAPI_OFFSET_DEV_RXBUF, val) +#define ATAPI_GET_INT_MASK(base)\ + bfin_read16(base + ATAPI_OFFSET_INT_MASK) +#define ATAPI_SET_INT_MASK(base, val)\ + bfin_write16(base + ATAPI_OFFSET_INT_MASK, val) +#define ATAPI_GET_INT_STATUS(base)\ + bfin_read16(base + ATAPI_OFFSET_INT_STATUS) +#define ATAPI_SET_INT_STATUS(base, val)\ + bfin_write16(base + ATAPI_OFFSET_INT_STATUS, val) +#define ATAPI_GET_XFER_LEN(base)\ + bfin_read16(base + ATAPI_OFFSET_XFER_LEN) +#define ATAPI_SET_XFER_LEN(base, val)\ + bfin_write16(base + ATAPI_OFFSET_XFER_LEN, val) +#define ATAPI_GET_LINE_STATUS(base)\ + bfin_read16(base + ATAPI_OFFSET_LINE_STATUS) +#define ATAPI_GET_SM_STATE(base)\ + bfin_read16(base + ATAPI_OFFSET_SM_STATE) +#define ATAPI_GET_TERMINATE(base)\ + bfin_read16(base + ATAPI_OFFSET_TERMINATE) +#define ATAPI_SET_TERMINATE(base, val)\ + bfin_write16(base + ATAPI_OFFSET_TERMINATE, val) +#define ATAPI_GET_PIO_TFRCNT(base)\ + bfin_read16(base + ATAPI_OFFSET_PIO_TFRCNT) +#define ATAPI_GET_DMA_TFRCNT(base)\ + bfin_read16(base + ATAPI_OFFSET_DMA_TFRCNT) +#define ATAPI_GET_UMAIN_TFRCNT(base)\ + bfin_read16(base + ATAPI_OFFSET_UMAIN_TFRCNT) +#define ATAPI_GET_UDMAOUT_TFRCNT(base)\ + bfin_read16(base + ATAPI_OFFSET_UDMAOUT_TFRCNT) +#define ATAPI_GET_REG_TIM_0(base)\ + bfin_read16(base + ATAPI_OFFSET_REG_TIM_0) +#define ATAPI_SET_REG_TIM_0(base, val)\ + bfin_write16(base + ATAPI_OFFSET_REG_TIM_0, val) +#define ATAPI_GET_PIO_TIM_0(base)\ + bfin_read16(base + ATAPI_OFFSET_PIO_TIM_0) +#define ATAPI_SET_PIO_TIM_0(base, val)\ + bfin_write16(base + ATAPI_OFFSET_PIO_TIM_0, val) +#define ATAPI_GET_PIO_TIM_1(base)\ + bfin_read16(base + ATAPI_OFFSET_PIO_TIM_1) +#define ATAPI_SET_PIO_TIM_1(base, val)\ + bfin_write16(base + ATAPI_OFFSET_PIO_TIM_1, val) +#define ATAPI_GET_MULTI_TIM_0(base)\ + bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_0) +#define ATAPI_SET_MULTI_TIM_0(base, val)\ + bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_0, val) +#define ATAPI_GET_MULTI_TIM_1(base)\ + bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_1) +#define ATAPI_SET_MULTI_TIM_1(base, val)\ + bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_1, val) +#define ATAPI_GET_MULTI_TIM_2(base)\ + bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_2) +#define ATAPI_SET_MULTI_TIM_2(base, val)\ + bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_2, val) +#define ATAPI_GET_ULTRA_TIM_0(base)\ + bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_0) +#define ATAPI_SET_ULTRA_TIM_0(base, val)\ + bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_0, val) +#define ATAPI_GET_ULTRA_TIM_1(base)\ + bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_1) +#define ATAPI_SET_ULTRA_TIM_1(base, val)\ + bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_1, val) +#define ATAPI_GET_ULTRA_TIM_2(base)\ + bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_2) +#define ATAPI_SET_ULTRA_TIM_2(base, val)\ + bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_2, val) +#define ATAPI_GET_ULTRA_TIM_3(base)\ + bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_3) +#define ATAPI_SET_ULTRA_TIM_3(base, val)\ + bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_3, val) + +/** + * PIO Mode - Frequency compatibility + */ +/* mode: 0 1 2 3 4 */ +static const u32 pio_fsclk[] = +{ 33333333, 33333333, 33333333, 33333333, 33333333 }; + +/** + * MDMA Mode - Frequency compatibility + */ +/* mode: 0 1 2 */ +static const u32 mdma_fsclk[] = { 33333333, 33333333, 33333333 }; + +/** + * UDMA Mode - Frequency compatibility + * + * UDMA5 - 100 MB/s - SCLK = 133 MHz + * UDMA4 - 66 MB/s - SCLK >= 80 MHz + * UDMA3 - 44.4 MB/s - SCLK >= 50 MHz + * UDMA2 - 33 MB/s - SCLK >= 40 MHz + */ +/* mode: 0 1 2 3 4 5 */ +static const u32 udma_fsclk[] = +{ 33333333, 33333333, 40000000, 50000000, 80000000, 133333333 }; + +/** + * Register transfer timing table + */ +/* mode: 0 1 2 3 4 */ +/* Cycle Time */ +static const u32 reg_t0min[] = { 600, 383, 330, 180, 120 }; +/* DIOR/DIOW to end cycle */ +static const u32 reg_t2min[] = { 290, 290, 290, 70, 25 }; +/* DIOR/DIOW asserted pulse width */ +static const u32 reg_teocmin[] = { 290, 290, 290, 80, 70 }; + +/** + * PIO timing table + */ +/* mode: 0 1 2 3 4 */ +/* Cycle Time */ +static const u32 pio_t0min[] = { 600, 383, 240, 180, 120 }; +/* Address valid to DIOR/DIORW */ +static const u32 pio_t1min[] = { 70, 50, 30, 30, 25 }; +/* DIOR/DIOW to end cycle */ +static const u32 pio_t2min[] = { 165, 125, 100, 80, 70 }; +/* DIOR/DIOW asserted pulse width */ +static const u32 pio_teocmin[] = { 165, 125, 100, 70, 25 }; +/* DIOW data hold */ +static const u32 pio_t4min[] = { 30, 20, 15, 10, 10 }; + +/* ****************************************************************** + * Multiword DMA timing table + * ****************************************************************** + */ +/* mode: 0 1 2 */ +/* Cycle Time */ +static const u32 mdma_t0min[] = { 480, 150, 120 }; +/* DIOR/DIOW asserted pulse width */ +static const u32 mdma_tdmin[] = { 215, 80, 70 }; +/* DMACK to read data released */ +static const u32 mdma_thmin[] = { 20, 15, 10 }; +/* DIOR/DIOW to DMACK hold */ +static const u32 mdma_tjmin[] = { 20, 5, 5 }; +/* DIOR negated pulse width */ +static const u32 mdma_tkrmin[] = { 50, 50, 25 }; +/* DIOR negated pulse width */ +static const u32 mdma_tkwmin[] = { 215, 50, 25 }; +/* CS[1:0] valid to DIOR/DIOW */ +static const u32 mdma_tmmin[] = { 50, 30, 25 }; +/* DMACK to read data released */ +static const u32 mdma_tzmax[] = { 20, 25, 25 }; + +/** + * Ultra DMA timing table + */ +/* mode: 0 1 2 3 4 5 */ +static const u32 udma_tcycmin[] = { 112, 73, 54, 39, 25, 17 }; +static const u32 udma_tdvsmin[] = { 70, 48, 31, 20, 7, 5 }; +static const u32 udma_tenvmax[] = { 70, 70, 70, 55, 55, 50 }; +static const u32 udma_trpmin[] = { 160, 125, 100, 100, 100, 85 }; +static const u32 udma_tmin[] = { 5, 5, 5, 5, 3, 3 }; + + +static const u32 udma_tmlimin = 20; +static const u32 udma_tzahmin = 20; +static const u32 udma_tenvmin = 20; +static const u32 udma_tackmin = 20; +static const u32 udma_tssmin = 50; + +#define BFIN_MAX_SG_SEGMENTS 4 + +/** + * + * Function: num_clocks_min + * + * Description: + * calculate number of SCLK cycles to meet minimum timing + */ +static unsigned short num_clocks_min(unsigned long tmin, + unsigned long fsclk) +{ + unsigned long tmp ; + unsigned short result; + + tmp = tmin * (fsclk/1000/1000) / 1000; + result = (unsigned short)tmp; + if ((tmp*1000*1000) < (tmin*(fsclk/1000))) { + result++; + } + + return result; +} + +/** + * bfin_set_piomode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: um + * + * Set PIO mode for device. + * + * LOCKING: + * None (inherited from caller). + */ + +static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + int mode = adev->pio_mode - XFER_PIO_0; + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + unsigned int fsclk = get_sclk(); + unsigned short teoc_reg, t2_reg, teoc_pio; + unsigned short t4_reg, t2_pio, t1_reg; + unsigned short n0, n6, t6min = 5; + + /* the most restrictive timing value is t6 and tc, the DIOW - data hold + * If one SCLK pulse is longer than this minimum value then register + * transfers cannot be supported at this frequency. + */ + n6 = num_clocks_min(t6min, fsclk); + if (mode >= 0 && mode <= 4 && n6 >= 1) { + dev_dbg(adev->link->ap->dev, "set piomode: mode=%d, fsclk=%ud\n", mode, fsclk); + /* calculate the timing values for register transfers. */ + while (mode > 0 && pio_fsclk[mode] > fsclk) + mode--; + + /* DIOR/DIOW to end cycle time */ + t2_reg = num_clocks_min(reg_t2min[mode], fsclk); + /* DIOR/DIOW asserted pulse width */ + teoc_reg = num_clocks_min(reg_teocmin[mode], fsclk); + /* Cycle Time */ + n0 = num_clocks_min(reg_t0min[mode], fsclk); + + /* increase t2 until we meed the minimum cycle length */ + if (t2_reg + teoc_reg < n0) + t2_reg = n0 - teoc_reg; + + /* calculate the timing values for pio transfers. */ + + /* DIOR/DIOW to end cycle time */ + t2_pio = num_clocks_min(pio_t2min[mode], fsclk); + /* DIOR/DIOW asserted pulse width */ + teoc_pio = num_clocks_min(pio_teocmin[mode], fsclk); + /* Cycle Time */ + n0 = num_clocks_min(pio_t0min[mode], fsclk); + + /* increase t2 until we meed the minimum cycle length */ + if (t2_pio + teoc_pio < n0) + t2_pio = n0 - teoc_pio; + + /* Address valid to DIOR/DIORW */ + t1_reg = num_clocks_min(pio_t1min[mode], fsclk); + + /* DIOW data hold */ + t4_reg = num_clocks_min(pio_t4min[mode], fsclk); + + ATAPI_SET_REG_TIM_0(base, (teoc_reg<<8 | t2_reg)); + ATAPI_SET_PIO_TIM_0(base, (t4_reg<<12 | t2_pio<<4 | t1_reg)); + ATAPI_SET_PIO_TIM_1(base, teoc_pio); + if (mode > 2) { + ATAPI_SET_CONTROL(base, + ATAPI_GET_CONTROL(base) | IORDY_EN); + } else { + ATAPI_SET_CONTROL(base, + ATAPI_GET_CONTROL(base) & ~IORDY_EN); + } + + /* Disable host ATAPI PIO interrupts */ + ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base) + & ~(PIO_DONE_MASK | HOST_TERM_XFER_MASK)); + SSYNC(); + } +} + +/** + * bfin_set_dmamode - Initialize host controller PATA DMA timings + * @ap: Port whose timings we are configuring + * @adev: um + * + * Set UDMA mode for device. + * + * LOCKING: + * None (inherited from caller). + */ + +static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev) +{ + int mode; + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + unsigned long fsclk = get_sclk(); + unsigned short tenv, tack, tcyc_tdvs, tdvs, tmli, tss, trp, tzah; + unsigned short tm, td, tkr, tkw, teoc, th; + unsigned short n0, nf, tfmin = 5; + unsigned short nmin, tcyc; + + mode = adev->dma_mode - XFER_UDMA_0; + if (mode >= 0 && mode <= 5) { + dev_dbg(adev->link->ap->dev, "set udmamode: mode=%d\n", mode); + /* the most restrictive timing value is t6 and tc, + * the DIOW - data hold. If one SCLK pulse is longer + * than this minimum value then register + * transfers cannot be supported at this frequency. + */ + while (mode > 0 && udma_fsclk[mode] > fsclk) + mode--; + + nmin = num_clocks_min(udma_tmin[mode], fsclk); + if (nmin >= 1) { + /* calculate the timing values for Ultra DMA. */ + tdvs = num_clocks_min(udma_tdvsmin[mode], fsclk); + tcyc = num_clocks_min(udma_tcycmin[mode], fsclk); + tcyc_tdvs = 2; + + /* increase tcyc - tdvs (tcyc_tdvs) until we meed + * the minimum cycle length + */ + if (tdvs + tcyc_tdvs < tcyc) + tcyc_tdvs = tcyc - tdvs; + + /* Mow assign the values required for the timing + * registers + */ + if (tcyc_tdvs < 2) + tcyc_tdvs = 2; + + if (tdvs < 2) + tdvs = 2; + + tack = num_clocks_min(udma_tackmin, fsclk); + tss = num_clocks_min(udma_tssmin, fsclk); + tmli = num_clocks_min(udma_tmlimin, fsclk); + tzah = num_clocks_min(udma_tzahmin, fsclk); + trp = num_clocks_min(udma_trpmin[mode], fsclk); + tenv = num_clocks_min(udma_tenvmin, fsclk); + if (tenv <= udma_tenvmax[mode]) { + ATAPI_SET_ULTRA_TIM_0(base, (tenv<<8 | tack)); + ATAPI_SET_ULTRA_TIM_1(base, + (tcyc_tdvs<<8 | tdvs)); + ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss)); + ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah)); + } + } + } + + mode = adev->dma_mode - XFER_MW_DMA_0; + if (mode >= 0 && mode <= 2) { + dev_dbg(adev->link->ap->dev, "set mdmamode: mode=%d\n", mode); + /* the most restrictive timing value is tf, the DMACK to + * read data released. If one SCLK pulse is longer than + * this maximum value then the MDMA mode + * cannot be supported at this frequency. + */ + while (mode > 0 && mdma_fsclk[mode] > fsclk) + mode--; + + nf = num_clocks_min(tfmin, fsclk); + if (nf >= 1) { + /* calculate the timing values for Multi-word DMA. */ + + /* DIOR/DIOW asserted pulse width */ + td = num_clocks_min(mdma_tdmin[mode], fsclk); + + /* DIOR negated pulse width */ + tkw = num_clocks_min(mdma_tkwmin[mode], fsclk); + + /* Cycle Time */ + n0 = num_clocks_min(mdma_t0min[mode], fsclk); + + /* increase tk until we meed the minimum cycle length */ + if (tkw + td < n0) + tkw = n0 - td; + + /* DIOR negated pulse width - read */ + tkr = num_clocks_min(mdma_tkrmin[mode], fsclk); + /* CS{1:0] valid to DIOR/DIOW */ + tm = num_clocks_min(mdma_tmmin[mode], fsclk); + /* DIOR/DIOW to DMACK hold */ + teoc = num_clocks_min(mdma_tjmin[mode], fsclk); + /* DIOW Data hold */ + th = num_clocks_min(mdma_thmin[mode], fsclk); + + ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td)); + ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw)); + ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th)); + SSYNC(); + } + } + return; +} + +/** + * + * Function: wait_complete + * + * Description: Waits the interrupt from device + * + */ +static inline void wait_complete(void __iomem *base, unsigned short mask) +{ + unsigned short status; + unsigned int i = 0; + +#define PATA_BF54X_WAIT_TIMEOUT 10000 + + for (i = 0; i < PATA_BF54X_WAIT_TIMEOUT; i++) { + status = ATAPI_GET_INT_STATUS(base) & mask; + if (status) + break; + } + + ATAPI_SET_INT_STATUS(base, mask); +} + +/** + * + * Function: write_atapi_register + * + * Description: Writes to ATA Device Resgister + * + */ + +static void write_atapi_register(void __iomem *base, + unsigned long ata_reg, unsigned short value) +{ + /* Program the ATA_DEV_TXBUF register with write data (to be + * written into the device). + */ + ATAPI_SET_DEV_TXBUF(base, value); + + /* Program the ATA_DEV_ADDR register with address of the + * device register (0x01 to 0x0F). + */ + ATAPI_SET_DEV_ADDR(base, ata_reg); + + /* Program the ATA_CTRL register with dir set to write (1) + */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR)); + + /* ensure PIO DMA is not set */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); + + /* and start the transfer */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); + + /* Wait for the interrupt to indicate the end of the transfer. + * (We need to wait on and clear rhe ATA_DEV_INT interrupt status) + */ + wait_complete(base, PIO_DONE_INT); +} + +/** + * + * Function: read_atapi_register + * + *Description: Reads from ATA Device Resgister + * + */ + +static unsigned short read_atapi_register(void __iomem *base, + unsigned long ata_reg) +{ + /* Program the ATA_DEV_ADDR register with address of the + * device register (0x01 to 0x0F). + */ + ATAPI_SET_DEV_ADDR(base, ata_reg); + + /* Program the ATA_CTRL register with dir set to read (0) and + */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR)); + + /* ensure PIO DMA is not set */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); + + /* and start the transfer */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); + + /* Wait for the interrupt to indicate the end of the transfer. + * (PIO_DONE interrupt is set and it doesn't seem to matter + * that we don't clear it) + */ + wait_complete(base, PIO_DONE_INT); + + /* Read the ATA_DEV_RXBUF register with write data (to be + * written into the device). + */ + return ATAPI_GET_DEV_RXBUF(base); +} + +/** + * + * Function: write_atapi_register_data + * + * Description: Writes to ATA Device Resgister + * + */ + +static void write_atapi_data(void __iomem *base, + int len, unsigned short *buf) +{ + int i; + + /* Set transfer length to 1 */ + ATAPI_SET_XFER_LEN(base, 1); + + /* Program the ATA_DEV_ADDR register with address of the + * ATA_REG_DATA + */ + ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA); + + /* Program the ATA_CTRL register with dir set to write (1) + */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR)); + + /* ensure PIO DMA is not set */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); + + for (i = 0; i < len; i++) { + /* Program the ATA_DEV_TXBUF register with write data (to be + * written into the device). + */ + ATAPI_SET_DEV_TXBUF(base, buf[i]); + + /* and start the transfer */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); + + /* Wait for the interrupt to indicate the end of the transfer. + * (We need to wait on and clear rhe ATA_DEV_INT + * interrupt status) + */ + wait_complete(base, PIO_DONE_INT); + } +} + +/** + * + * Function: read_atapi_register_data + * + * Description: Reads from ATA Device Resgister + * + */ + +static void read_atapi_data(void __iomem *base, + int len, unsigned short *buf) +{ + int i; + + /* Set transfer length to 1 */ + ATAPI_SET_XFER_LEN(base, 1); + + /* Program the ATA_DEV_ADDR register with address of the + * ATA_REG_DATA + */ + ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA); + + /* Program the ATA_CTRL register with dir set to read (0) and + */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR)); + + /* ensure PIO DMA is not set */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA)); + + for (i = 0; i < len; i++) { + /* and start the transfer */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START)); + + /* Wait for the interrupt to indicate the end of the transfer. + * (PIO_DONE interrupt is set and it doesn't seem to matter + * that we don't clear it) + */ + wait_complete(base, PIO_DONE_INT); + + /* Read the ATA_DEV_RXBUF register with write data (to be + * written into the device). + */ + buf[i] = ATAPI_GET_DEV_RXBUF(base); + } +} + +/** + * bfin_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Note: Original code is ata_sff_tf_load(). + */ + +static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + write_atapi_register(base, ATA_REG_CTRL, tf->ctl); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + + if (is_addr) { + if (tf->flags & ATA_TFLAG_LBA48) { + write_atapi_register(base, ATA_REG_FEATURE, + tf->hob_feature); + write_atapi_register(base, ATA_REG_NSECT, + tf->hob_nsect); + write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal); + write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam); + write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah); + dev_dbg(ap->dev, "hob: feat 0x%X nsect 0x%X, lba 0x%X " + "0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + write_atapi_register(base, ATA_REG_FEATURE, tf->feature); + write_atapi_register(base, ATA_REG_NSECT, tf->nsect); + write_atapi_register(base, ATA_REG_LBAL, tf->lbal); + write_atapi_register(base, ATA_REG_LBAM, tf->lbam); + write_atapi_register(base, ATA_REG_LBAH, tf->lbah); + dev_dbg(ap->dev, "feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + write_atapi_register(base, ATA_REG_DEVICE, tf->device); + dev_dbg(ap->dev, "device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + +/** + * bfin_check_status - Read device status reg & clear interrupt + * @ap: port where the device is + * + * Note: Original code is ata_check_status(). + */ + +static u8 bfin_check_status(struct ata_port *ap) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + return read_atapi_register(base, ATA_REG_STATUS); +} + +/** + * bfin_tf_read - input device's ATA taskfile shadow registers + * @ap: Port from which input is read + * @tf: ATA taskfile register set for storing input + * + * Note: Original code is ata_sff_tf_read(). + */ + +static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + + tf->command = bfin_check_status(ap); + tf->feature = read_atapi_register(base, ATA_REG_ERR); + tf->nsect = read_atapi_register(base, ATA_REG_NSECT); + tf->lbal = read_atapi_register(base, ATA_REG_LBAL); + tf->lbam = read_atapi_register(base, ATA_REG_LBAM); + tf->lbah = read_atapi_register(base, ATA_REG_LBAH); + tf->device = read_atapi_register(base, ATA_REG_DEVICE); + + if (tf->flags & ATA_TFLAG_LBA48) { + write_atapi_register(base, ATA_REG_CTRL, tf->ctl | ATA_HOB); + tf->hob_feature = read_atapi_register(base, ATA_REG_ERR); + tf->hob_nsect = read_atapi_register(base, ATA_REG_NSECT); + tf->hob_lbal = read_atapi_register(base, ATA_REG_LBAL); + tf->hob_lbam = read_atapi_register(base, ATA_REG_LBAM); + tf->hob_lbah = read_atapi_register(base, ATA_REG_LBAH); + } +} + +/** + * bfin_exec_command - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Note: Original code is ata_sff_exec_command(). + */ + +static void bfin_exec_command(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command); + + write_atapi_register(base, ATA_REG_CMD, tf->command); + ata_sff_pause(ap); +} + +/** + * bfin_check_altstatus - Read device alternate status reg + * @ap: port where the device is + */ + +static u8 bfin_check_altstatus(struct ata_port *ap) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + return read_atapi_register(base, ATA_REG_ALTSTATUS); +} + +/** + * bfin_dev_select - Select device 0/1 on ATA bus + * @ap: ATA channel to manipulate + * @device: ATA device (numbered from zero) to select + * + * Note: Original code is ata_sff_dev_select(). + */ + +static void bfin_dev_select(struct ata_port *ap, unsigned int device) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + u8 tmp; + + if (device == 0) + tmp = ATA_DEVICE_OBS; + else + tmp = ATA_DEVICE_OBS | ATA_DEV1; + + write_atapi_register(base, ATA_REG_DEVICE, tmp); + ata_sff_pause(ap); +} + +/** + * bfin_set_devctl - Write device control reg + * @ap: port where the device is + * @ctl: value to write + */ + +static void bfin_set_devctl(struct ata_port *ap, u8 ctl) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + write_atapi_register(base, ATA_REG_CTRL, ctl); +} + +/** + * bfin_bmdma_setup - Set up IDE DMA transaction + * @qc: Info associated with this ATA transaction. + * + * Note: Original code is ata_bmdma_setup(). + */ + +static void bfin_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd; + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN; + struct scatterlist *sg; + unsigned int si; + unsigned int channel; + unsigned int dir; + unsigned int size = 0; + + dev_dbg(qc->ap->dev, "in atapi dma setup\n"); + /* Program the ATA_CTRL register with dir */ + if (qc->tf.flags & ATA_TFLAG_WRITE) { + channel = CH_ATAPI_TX; + dir = DMA_TO_DEVICE; + } else { + channel = CH_ATAPI_RX; + dir = DMA_FROM_DEVICE; + config |= WNR; + } + + dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir); + + /* fill the ATAPI DMA controller */ + for_each_sg(qc->sg, sg, qc->n_elem, si) { + dma_desc_cpu[si].start_addr = sg_dma_address(sg); + dma_desc_cpu[si].cfg = config; + dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1; + dma_desc_cpu[si].x_modify = 2; + size += sg_dma_len(sg); + } + + /* Set the last descriptor to stop mode */ + dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE); + + flush_dcache_range((unsigned int)dma_desc_cpu, + (unsigned int)dma_desc_cpu + + qc->n_elem * sizeof(struct dma_desc_array)); + + /* Enable ATA DMA operation*/ + set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma); + set_dma_x_count(channel, 0); + set_dma_x_modify(channel, 0); + set_dma_config(channel, config); + + SSYNC(); + + /* Send ATA DMA command */ + bfin_exec_command(ap, &qc->tf); + + if (qc->tf.flags & ATA_TFLAG_WRITE) { + /* set ATA DMA write direction */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) + | XFER_DIR)); + } else { + /* set ATA DMA read direction */ + ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) + & ~XFER_DIR)); + } + + /* Reset all transfer count */ + ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST); + + /* Set ATAPI state machine contorl in terminate sequence */ + ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM); + + /* Set transfer length to the total size of sg buffers */ + ATAPI_SET_XFER_LEN(base, size >> 1); +} + +/** + * bfin_bmdma_start - Start an IDE DMA transaction + * @qc: Info associated with this ATA transaction. + * + * Note: Original code is ata_bmdma_start(). + */ + +static void bfin_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + + dev_dbg(qc->ap->dev, "in atapi dma start\n"); + + if (!(ap->udma_mask || ap->mwdma_mask)) + return; + + /* start ATAPI transfer*/ + if (ap->udma_mask) + ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) + | ULTRA_START); + else + ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) + | MULTI_START); +} + +/** + * bfin_bmdma_stop - Stop IDE DMA transfer + * @qc: Command we are ending DMA for + */ + +static void bfin_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int dir; + + dev_dbg(qc->ap->dev, "in atapi dma stop\n"); + + if (!(ap->udma_mask || ap->mwdma_mask)) + return; + + /* stop ATAPI DMA controller*/ + if (qc->tf.flags & ATA_TFLAG_WRITE) { + dir = DMA_TO_DEVICE; + disable_dma(CH_ATAPI_TX); + } else { + dir = DMA_FROM_DEVICE; + disable_dma(CH_ATAPI_RX); + } + + dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir); +} + +/** + * bfin_devchk - PATA device presence detection + * @ap: ATA channel to examine + * @device: Device to examine (starting at zero) + * + * Note: Original code is ata_devchk(). + */ + +static unsigned int bfin_devchk(struct ata_port *ap, + unsigned int device) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + u8 nsect, lbal; + + bfin_dev_select(ap, device); + + write_atapi_register(base, ATA_REG_NSECT, 0x55); + write_atapi_register(base, ATA_REG_LBAL, 0xaa); + + write_atapi_register(base, ATA_REG_NSECT, 0xaa); + write_atapi_register(base, ATA_REG_LBAL, 0x55); + + write_atapi_register(base, ATA_REG_NSECT, 0x55); + write_atapi_register(base, ATA_REG_LBAL, 0xaa); + + nsect = read_atapi_register(base, ATA_REG_NSECT); + lbal = read_atapi_register(base, ATA_REG_LBAL); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} + +/** + * bfin_bus_post_reset - PATA device post reset + * + * Note: Original code is ata_bus_post_reset(). + */ + +static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + unsigned int dev0 = devmask & (1 << 0); + unsigned int dev1 = devmask & (1 << 1); + unsigned long deadline; + + /* if device 0 was found in ata_devchk, wait for its + * BSY bit to clear + */ + if (dev0) + ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + /* if device 1 was found in ata_devchk, wait for + * register access, then wait for BSY to clear + */ + deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT); + while (dev1) { + u8 nsect, lbal; + + bfin_dev_select(ap, 1); + nsect = read_atapi_register(base, ATA_REG_NSECT); + lbal = read_atapi_register(base, ATA_REG_LBAL); + if ((nsect == 1) && (lbal == 1)) + break; + if (time_after(jiffies, deadline)) { + dev1 = 0; + break; + } + ata_msleep(ap, 50); /* give drive a breather */ + } + if (dev1) + ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + /* is all this really necessary? */ + bfin_dev_select(ap, 0); + if (dev1) + bfin_dev_select(ap, 1); + if (dev0) + bfin_dev_select(ap, 0); +} + +/** + * bfin_bus_softreset - PATA device software reset + * + * Note: Original code is ata_bus_softreset(). + */ + +static unsigned int bfin_bus_softreset(struct ata_port *ap, + unsigned int devmask) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + + /* software reset. causes dev0 to be selected */ + write_atapi_register(base, ATA_REG_CTRL, ap->ctl); + udelay(20); + write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST); + udelay(20); + write_atapi_register(base, ATA_REG_CTRL, ap->ctl); + + /* spec mandates ">= 2ms" before checking status. + * We wait 150ms, because that was the magic delay used for + * ATAPI devices in Hale Landis's ATADRVR, for the period of time + * between when the ATA command register is written, and then + * status is checked. Because waiting for "a while" before + * checking status is fine, post SRST, we perform this magic + * delay here as well. + * + * Old drivers/ide uses the 2mS rule and then waits for ready + */ + ata_msleep(ap, 150); + + /* Before we perform post reset processing we want to see if + * the bus shows 0xFF because the odd clown forgets the D7 + * pulldown resistor. + */ + if (bfin_check_status(ap) == 0xFF) + return 0; + + bfin_bus_post_reset(ap, devmask); + + return 0; +} + +/** + * bfin_softreset - reset host port via ATA SRST + * @ap: port to reset + * @classes: resulting classes of attached devices + * + * Note: Original code is ata_sff_softreset(). + */ + +static int bfin_softreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; + unsigned int devmask = 0, err_mask; + u8 err; + + /* determine if device 0/1 are present */ + if (bfin_devchk(ap, 0)) + devmask |= (1 << 0); + if (slave_possible && bfin_devchk(ap, 1)) + devmask |= (1 << 1); + + /* select device 0 again */ + bfin_dev_select(ap, 0); + + /* issue bus reset */ + err_mask = bfin_bus_softreset(ap, devmask); + if (err_mask) { + ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", + err_mask); + return -EIO; + } + + /* determine by signature whether we have ATA or ATAPI devices */ + classes[0] = ata_sff_dev_classify(&ap->link.device[0], + devmask & (1 << 0), &err); + if (slave_possible && err != 0x81) + classes[1] = ata_sff_dev_classify(&ap->link.device[1], + devmask & (1 << 1), &err); + + return 0; +} + +/** + * bfin_bmdma_status - Read IDE DMA status + * @ap: Port associated with this ATA transaction. + */ + +static unsigned char bfin_bmdma_status(struct ata_port *ap) +{ + unsigned char host_stat = 0; + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + + if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON | ULTRA_XFER_ON)) + host_stat |= ATA_DMA_ACTIVE; + if (ATAPI_GET_INT_STATUS(base) & ATAPI_DEV_INT) + host_stat |= ATA_DMA_INTR; + + dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat); + + return host_stat; +} + +/** + * bfin_data_xfer - Transfer data by PIO + * @adev: device for this I/O + * @buf: data buffer + * @buflen: buffer length + * @write_data: read/write + * + * Note: Original code is ata_sff_data_xfer(). + */ + +static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf, + unsigned int buflen, int rw) +{ + struct ata_port *ap = dev->link->ap; + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + unsigned int words = buflen >> 1; + unsigned short *buf16 = (u16 *)buf; + + /* Transfer multiple of 2 bytes */ + if (rw == READ) + read_atapi_data(base, words, buf16); + else + write_atapi_data(base, words, buf16); + + /* Transfer trailing 1 byte, if any. */ + if (unlikely(buflen & 0x01)) { + unsigned short align_buf[1] = { 0 }; + unsigned char *trailing_buf = buf + buflen - 1; + + if (rw == READ) { + read_atapi_data(base, 1, align_buf); + memcpy(trailing_buf, align_buf, 1); + } else { + memcpy(align_buf, trailing_buf, 1); + write_atapi_data(base, 1, align_buf); + } + words++; + } + + return words << 1; +} + +/** + * bfin_irq_clear - Clear ATAPI interrupt. + * @ap: Port associated with this ATA transaction. + * + * Note: Original code is ata_bmdma_irq_clear(). + */ + +static void bfin_irq_clear(struct ata_port *ap) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + + dev_dbg(ap->dev, "in atapi irq clear\n"); + ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT + | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT + | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT); +} + +/** + * bfin_thaw - Thaw DMA controller port + * @ap: port to thaw + * + * Note: Original code is ata_sff_thaw(). + */ + +void bfin_thaw(struct ata_port *ap) +{ + dev_dbg(ap->dev, "in atapi dma thaw\n"); + bfin_check_status(ap); + ata_sff_irq_on(ap); +} + +/** + * bfin_postreset - standard postreset callback + * @ap: the target ata_port + * @classes: classes of attached devices + * + * Note: Original code is ata_sff_postreset(). + */ + +static void bfin_postreset(struct ata_link *link, unsigned int *classes) +{ + struct ata_port *ap = link->ap; + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + + /* re-enable interrupts */ + ata_sff_irq_on(ap); + + /* is double-select really necessary? */ + if (classes[0] != ATA_DEV_NONE) + bfin_dev_select(ap, 1); + if (classes[1] != ATA_DEV_NONE) + bfin_dev_select(ap, 0); + + /* bail out if no device is present */ + if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { + return; + } + + /* set up device control */ + write_atapi_register(base, ATA_REG_CTRL, ap->ctl); +} + +static void bfin_port_stop(struct ata_port *ap) +{ + dev_dbg(ap->dev, "in atapi port stop\n"); + if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { + dma_free_coherent(ap->dev, + BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), + ap->bmdma_prd, + ap->bmdma_prd_dma); + + free_dma(CH_ATAPI_RX); + free_dma(CH_ATAPI_TX); + } +} + +static int bfin_port_start(struct ata_port *ap) +{ + dev_dbg(ap->dev, "in atapi port start\n"); + if (!(ap->udma_mask || ap->mwdma_mask)) + return 0; + + ap->bmdma_prd = dma_alloc_coherent(ap->dev, + BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), + &ap->bmdma_prd_dma, + GFP_KERNEL); + + if (ap->bmdma_prd == NULL) { + dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n"); + goto out; + } + + if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) { + if (request_dma(CH_ATAPI_TX, + "BFIN ATAPI TX DMA") >= 0) + return 0; + + free_dma(CH_ATAPI_RX); + dma_free_coherent(ap->dev, + BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), + ap->bmdma_prd, + ap->bmdma_prd_dma); + } + +out: + ap->udma_mask = 0; + ap->mwdma_mask = 0; + dev_err(ap->dev, "Unable to request ATAPI DMA!" + " Continue in PIO mode.\n"); + + return 0; +} + +static unsigned int bfin_ata_host_intr(struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + u8 status, host_stat = 0; + + VPRINTK("ata%u: protocol %d task_state %d\n", + ap->print_id, qc->tf.protocol, ap->hsm_task_state); + + /* Check whether we are expecting interrupt in this state */ + switch (ap->hsm_task_state) { + case HSM_ST_FIRST: + /* Some pre-ATAPI-4 devices assert INTRQ + * at this state when ready to receive CDB. + */ + + /* Check the ATA_DFLAG_CDB_INTR flag is enough here. + * The flag was turned on only for atapi devices. + * No need to check is_atapi_taskfile(&qc->tf) again. + */ + if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) + goto idle_irq; + break; + case HSM_ST_LAST: + if (qc->tf.protocol == ATA_PROT_DMA || + qc->tf.protocol == ATAPI_PROT_DMA) { + /* check status of DMA engine */ + host_stat = ap->ops->bmdma_status(ap); + VPRINTK("ata%u: host_stat 0x%X\n", + ap->print_id, host_stat); + + /* if it's not our irq... */ + if (!(host_stat & ATA_DMA_INTR)) + goto idle_irq; + + /* before we do anything else, clear DMA-Start bit */ + ap->ops->bmdma_stop(qc); + + if (unlikely(host_stat & ATA_DMA_ERR)) { + /* error when transferring data to/from memory */ + qc->err_mask |= AC_ERR_HOST_BUS; + ap->hsm_task_state = HSM_ST_ERR; + } + } + break; + case HSM_ST: + break; + default: + goto idle_irq; + } + + /* check altstatus */ + status = ap->ops->sff_check_altstatus(ap); + if (status & ATA_BUSY) + goto busy_ata; + + /* check main status, clearing INTRQ */ + status = ap->ops->sff_check_status(ap); + if (unlikely(status & ATA_BUSY)) + goto busy_ata; + + /* ack bmdma irq events */ + ap->ops->sff_irq_clear(ap); + + ata_sff_hsm_move(ap, qc, status, 0); + + if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || + qc->tf.protocol == ATAPI_PROT_DMA)) + ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); + +busy_ata: + return 1; /* irq handled */ + +idle_irq: + ap->stats.idle_irq++; + +#ifdef ATA_IRQ_TRAP + if ((ap->stats.idle_irq % 1000) == 0) { + ap->ops->irq_ack(ap, 0); /* debug trap */ + ata_port_warn(ap, "irq trap\n"); + return 1; + } +#endif + return 0; /* irq not handled */ +} + +static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + unsigned int i; + unsigned int handled = 0; + unsigned long flags; + + /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ + spin_lock_irqsave(&host->lock, flags); + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) + handled |= bfin_ata_host_intr(ap, qc); + } + + spin_unlock_irqrestore(&host->lock, flags); + + return IRQ_RETVAL(handled); +} + + +static struct scsi_host_template bfin_sht = { + ATA_BASE_SHT(DRV_NAME), + .sg_tablesize = BFIN_MAX_SG_SEGMENTS, + .dma_boundary = ATA_DMA_BOUNDARY, +}; + +static struct ata_port_operations bfin_pata_ops = { + .inherits = &ata_bmdma_port_ops, + + .set_piomode = bfin_set_piomode, + .set_dmamode = bfin_set_dmamode, + + .sff_tf_load = bfin_tf_load, + .sff_tf_read = bfin_tf_read, + .sff_exec_command = bfin_exec_command, + .sff_check_status = bfin_check_status, + .sff_check_altstatus = bfin_check_altstatus, + .sff_dev_select = bfin_dev_select, + .sff_set_devctl = bfin_set_devctl, + + .bmdma_setup = bfin_bmdma_setup, + .bmdma_start = bfin_bmdma_start, + .bmdma_stop = bfin_bmdma_stop, + .bmdma_status = bfin_bmdma_status, + .sff_data_xfer = bfin_data_xfer, + + .qc_prep = ata_noop_qc_prep, + + .thaw = bfin_thaw, + .softreset = bfin_softreset, + .postreset = bfin_postreset, + + .sff_irq_clear = bfin_irq_clear, + + .port_start = bfin_port_start, + .port_stop = bfin_port_stop, +}; + +static struct ata_port_info bfin_port_info[] = { + { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = 0, + .udma_mask = 0, + .port_ops = &bfin_pata_ops, + }, +}; + +/** + * bfin_reset_controller - initialize BF54x ATAPI controller. + */ + +static int bfin_reset_controller(struct ata_host *host) +{ + void __iomem *base = (void __iomem *)host->ports[0]->ioaddr.ctl_addr; + int count; + unsigned short status; + + /* Disable all ATAPI interrupts */ + ATAPI_SET_INT_MASK(base, 0); + SSYNC(); + + /* Assert the RESET signal 25us*/ + ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | DEV_RST); + udelay(30); + + /* Negate the RESET signal for 2ms*/ + ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) & ~DEV_RST); + msleep(2); + + /* Wait on Busy flag to clear */ + count = 10000000; + do { + status = read_atapi_register(base, ATA_REG_STATUS); + } while (--count && (status & ATA_BUSY)); + + /* Enable only ATAPI Device interrupt */ + ATAPI_SET_INT_MASK(base, 1); + SSYNC(); + + return (!count); +} + +/** + * atapi_io_port - define atapi peripheral port pins. + */ +static unsigned short atapi_io_port[] = { + P_ATAPI_RESET, + P_ATAPI_DIOR, + P_ATAPI_DIOW, + P_ATAPI_CS0, + P_ATAPI_CS1, + P_ATAPI_DMACK, + P_ATAPI_DMARQ, + P_ATAPI_INTRQ, + P_ATAPI_IORDY, + P_ATAPI_D0A, + P_ATAPI_D1A, + P_ATAPI_D2A, + P_ATAPI_D3A, + P_ATAPI_D4A, + P_ATAPI_D5A, + P_ATAPI_D6A, + P_ATAPI_D7A, + P_ATAPI_D8A, + P_ATAPI_D9A, + P_ATAPI_D10A, + P_ATAPI_D11A, + P_ATAPI_D12A, + P_ATAPI_D13A, + P_ATAPI_D14A, + P_ATAPI_D15A, + P_ATAPI_A0A, + P_ATAPI_A1A, + P_ATAPI_A2A, + 0 +}; + +/** + * bfin_atapi_probe - attach a bfin atapi interface + * @pdev: platform device + * + * Register a bfin atapi interface. + * + * + * Platform devices are expected to contain 2 resources per port: + * + * - I/O Base (IORESOURCE_IO) + * - IRQ (IORESOURCE_IRQ) + * + */ +static int bfin_atapi_probe(struct platform_device *pdev) +{ + int board_idx = 0; + struct resource *res; + struct ata_host *host; + unsigned int fsclk = get_sclk(); + int udma_mode = 5; + const struct ata_port_info *ppi[] = + { &bfin_port_info[board_idx], NULL }; + + /* + * Simple resource validation .. + */ + if (unlikely(pdev->num_resources != 2)) { + dev_err(&pdev->dev, "invalid number of resources\n"); + return -EINVAL; + } + + /* + * Get the register base first + */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) + return -EINVAL; + + while (bfin_port_info[board_idx].udma_mask > 0 && + udma_fsclk[udma_mode] > fsclk) { + udma_mode--; + bfin_port_info[board_idx].udma_mask >>= 1; + } + + /* + * Now that that's out of the way, wire up the port.. + */ + host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1); + if (!host) + return -ENOMEM; + + host->ports[0]->ioaddr.ctl_addr = (void *)res->start; + + if (peripheral_request_list(atapi_io_port, "atapi-io-port")) { + dev_err(&pdev->dev, "Requesting Peripherals failed\n"); + return -EFAULT; + } + + if (bfin_reset_controller(host)) { + peripheral_free_list(atapi_io_port); + dev_err(&pdev->dev, "Fail to reset ATAPI device\n"); + return -EFAULT; + } + + if (ata_host_activate(host, platform_get_irq(pdev, 0), + bfin_ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) { + peripheral_free_list(atapi_io_port); + dev_err(&pdev->dev, "Fail to attach ATAPI device\n"); + return -ENODEV; + } + + platform_set_drvdata(pdev, host); + + return 0; +} + +/** + * bfin_atapi_remove - unplug a bfin atapi interface + * @pdev: platform device + * + * A bfin atapi device has been unplugged. Perform the needed + * cleanup. Also called on module unload for any active devices. + */ +static int bfin_atapi_remove(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + + ata_host_detach(host); + + peripheral_free_list(atapi_io_port); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct ata_host *host = platform_get_drvdata(pdev); + if (host) + return ata_host_suspend(host, state); + else + return 0; +} + +static int bfin_atapi_resume(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + int ret; + + if (host) { + ret = bfin_reset_controller(host); + if (ret) { + printk(KERN_ERR DRV_NAME ": Error during HW init\n"); + return ret; + } + ata_host_resume(host); + } + + return 0; +} +#else +#define bfin_atapi_suspend NULL +#define bfin_atapi_resume NULL +#endif + +static struct platform_driver bfin_atapi_driver = { + .probe = bfin_atapi_probe, + .remove = bfin_atapi_remove, + .suspend = bfin_atapi_suspend, + .resume = bfin_atapi_resume, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +#define ATAPI_MODE_SIZE 10 +static char bfin_atapi_mode[ATAPI_MODE_SIZE]; + +static int __init bfin_atapi_init(void) +{ + pr_info("register bfin atapi driver\n"); + + switch(bfin_atapi_mode[0]) { + case 'p': + case 'P': + break; + case 'm': + case 'M': + bfin_port_info[0].mwdma_mask = ATA_MWDMA2; + break; + default: + bfin_port_info[0].udma_mask = ATA_UDMA5; + }; + + return platform_driver_register(&bfin_atapi_driver); +} + +static void __exit bfin_atapi_exit(void) +{ + platform_driver_unregister(&bfin_atapi_driver); +} + +module_init(bfin_atapi_init); +module_exit(bfin_atapi_exit); +/* + * ATAPI mode: + * pio/PIO + * udma/UDMA (default) + * mwdma/MWDMA + */ +module_param_string(bfin_atapi_mode, bfin_atapi_mode, ATAPI_MODE_SIZE, 0); + +MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); +MODULE_DESCRIPTION("PATA driver for blackfin 54x ATAPI controller"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c new file mode 100644 index 00000000000..c47caa807fa --- /dev/null +++ b/drivers/ata/pata_cmd640.c @@ -0,0 +1,271 @@ +/* + * pata_cmd640.c - CMD640 PCI PATA for new ATA layer + * (C) 2007 Red Hat Inc + * + * Based upon + * linux/drivers/ide/pci/cmd640.c Version 1.02 Sep 01, 1996 + * + * Copyright (C) 1995-1996 Linus Torvalds & authors (see driver) + * + * This drives only the PCI version of the controller. If you have a + * VLB one then we have enough docs to support it but you can write + * your own code. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/gfp.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> + +#define DRV_NAME "pata_cmd640" +#define DRV_VERSION "0.0.5" + +struct cmd640_reg { + int last; + u8 reg58[ATA_MAX_DEVICES]; +}; + +enum { + CFR = 0x50, + CNTRL = 0x51, + CMDTIM = 0x52, + ARTIM0 = 0x53, + DRWTIM0 = 0x54, + ARTIM23 = 0x57, + DRWTIM23 = 0x58, + BRST = 0x59 +}; + +/** + * cmd640_set_piomode - set initial PIO mode data + * @ap: ATA port + * @adev: ATA device + * + * Called to do the PIO mode setup. + */ + +static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + struct cmd640_reg *timing = ap->private_data; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct ata_timing t; + const unsigned long T = 1000000 / 33; + const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 }; + u8 reg; + int arttim = ARTIM0 + 2 * adev->devno; + struct ata_device *pair = ata_dev_pair(adev); + + if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) { + printk(KERN_ERR DRV_NAME ": mode computation failed.\n"); + return; + } + + /* The second channel has shared timings and the setup timing is + messy to switch to merge it for worst case */ + if (ap->port_no && pair) { + struct ata_timing p; + ata_timing_compute(pair, pair->pio_mode, &p, T, 1); + ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP); + } + + /* Make the timings fit */ + if (t.recover > 16) { + t.active += t.recover - 16; + t.recover = 16; + } + if (t.active > 16) + t.active = 16; + + /* Now convert the clocks into values we can actually stuff into + the chip */ + + if (t.recover > 1) + t.recover--; /* 640B only */ + else + t.recover = 15; + + if (t.setup > 4) + t.setup = 0xC0; + else + t.setup = setup_data[t.setup]; + + if (ap->port_no == 0) { + t.active &= 0x0F; /* 0 = 16 */ + + /* Load setup timing */ + pci_read_config_byte(pdev, arttim, ®); + reg &= 0x3F; + reg |= t.setup; + pci_write_config_byte(pdev, arttim, reg); + + /* Load active/recovery */ + pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover); + } else { + /* Save the shared timings for channel, they will be loaded + by qc_issue. Reloading the setup time is expensive so we + keep a merged one loaded */ + pci_read_config_byte(pdev, ARTIM23, ®); + reg &= 0x3F; + reg |= t.setup; + pci_write_config_byte(pdev, ARTIM23, reg); + timing->reg58[adev->devno] = (t.active << 4) | t.recover; + } +} + + +/** + * cmd640_qc_issue - command preparation hook + * @qc: Command to be issued + * + * Channel 1 has shared timings. We must reprogram the + * clock each drive 2/3 switch we do. + */ + +static unsigned int cmd640_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_device *adev = qc->dev; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct cmd640_reg *timing = ap->private_data; + + if (ap->port_no != 0 && adev->devno != timing->last) { + pci_write_config_byte(pdev, DRWTIM23, timing->reg58[adev->devno]); + timing->last = adev->devno; + } + return ata_sff_qc_issue(qc); +} + +/** + * cmd640_port_start - port setup + * @ap: ATA port being set up + * + * The CMD640 needs to maintain private data structures so we + * allocate space here. + */ + +static int cmd640_port_start(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct cmd640_reg *timing; + + timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL); + if (timing == NULL) + return -ENOMEM; + timing->last = -1; /* Force a load */ + ap->private_data = timing; + return 0; +} + +static bool cmd640_sff_irq_check(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + int irq_reg = ap->port_no ? ARTIM23 : CFR; + u8 irq_stat, irq_mask = ap->port_no ? 0x10 : 0x04; + + pci_read_config_byte(pdev, irq_reg, &irq_stat); + + return irq_stat & irq_mask; +} + +static struct scsi_host_template cmd640_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static struct ata_port_operations cmd640_port_ops = { + .inherits = &ata_sff_port_ops, + /* In theory xfer_noirq is not needed once we kill the prefetcher */ + .sff_data_xfer = ata_sff_data_xfer_noirq, + .sff_irq_check = cmd640_sff_irq_check, + .qc_issue = cmd640_qc_issue, + .cable_detect = ata_cable_40wire, + .set_piomode = cmd640_set_piomode, + .port_start = cmd640_port_start, +}; + +static void cmd640_hardware_init(struct pci_dev *pdev) +{ + u8 ctrl; + + /* CMD640 detected, commiserations */ + pci_write_config_byte(pdev, 0x5B, 0x00); + /* PIO0 command cycles */ + pci_write_config_byte(pdev, CMDTIM, 0); + /* 512 byte bursts (sector) */ + pci_write_config_byte(pdev, BRST, 0x40); + /* + * A reporter a long time ago + * Had problems with the data fifo + * So don't run the risk + * Of putting crap on the disk + * For its better just to go slow + */ + /* Do channel 0 */ + pci_read_config_byte(pdev, CNTRL, &ctrl); + pci_write_config_byte(pdev, CNTRL, ctrl | 0xC0); + /* Ditto for channel 1 */ + pci_read_config_byte(pdev, ARTIM23, &ctrl); + ctrl |= 0x0C; + pci_write_config_byte(pdev, ARTIM23, ctrl); +} + +static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .port_ops = &cmd640_port_ops + }; + const struct ata_port_info *ppi[] = { &info, NULL }; + int rc; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + cmd640_hardware_init(pdev); + + return ata_pci_sff_init_one(pdev, ppi, &cmd640_sht, NULL, 0); +} + +#ifdef CONFIG_PM_SLEEP +static int cmd640_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + cmd640_hardware_init(pdev); + ata_host_resume(host); + return 0; +} +#endif + +static const struct pci_device_id cmd640[] = { + { PCI_VDEVICE(CMD, 0x640), 0 }, + { }, +}; + +static struct pci_driver cmd640_pci_driver = { + .name = DRV_NAME, + .id_table = cmd640, + .probe = cmd640_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = cmd640_reinit_one, +#endif +}; + +module_pci_driver(cmd640_pci_driver); + +MODULE_AUTHOR("Alan Cox"); +MODULE_DESCRIPTION("low-level driver for CMD640 PATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, cmd640); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index e92b0ef43ec..13ca5883285 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c @@ -1,7 +1,9 @@ /* - * pata_cmd64x.c - ATI PATA for new ATA layer + * pata_cmd64x.c - CMD64x PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox <alan@redhat.com> + * Alan Cox <alan@lxorguk.ukuu.org.uk> + * (C) 2009-2010 Bartlomiej Zolnierkiewicz + * (C) 2012 MontaVista Software, LLC <source@mvista.com> * * Based upon * linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002 @@ -24,14 +26,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_cmd64x" -#define DRV_VERSION "0.2.1" +#define DRV_VERSION "0.2.18" /* * CMD64x specific registers definition. @@ -39,11 +40,10 @@ enum { CFR = 0x50, - CFR_INTR_CH0 = 0x02, - CNTRL = 0x51, - CNTRL_DIS_RA0 = 0x40, - CNTRL_DIS_RA1 = 0x80, - CNTRL_ENA_2ND = 0x08, + CFR_INTR_CH0 = 0x04, + CNTRL = 0x51, + CNTRL_CH0 = 0x04, + CNTRL_CH1 = 0x08, CMDTIM = 0x52, ARTTIM0 = 0x53, DRWTIM0 = 0x54, @@ -53,9 +53,6 @@ enum { ARTTIM23_DIS_RA2 = 0x04, ARTTIM23_DIS_RA3 = 0x08, ARTTIM23_INTR_CH1 = 0x10, - ARTTIM2 = 0x57, - ARTTIM3 = 0x57, - DRWTIM23 = 0x58, DRWTIM2 = 0x58, BRST = 0x59, DRWTIM3 = 0x5b, @@ -63,25 +60,16 @@ enum { MRDMODE = 0x71, MRDMODE_INTR_CH0 = 0x04, MRDMODE_INTR_CH1 = 0x08, - MRDMODE_BLK_CH0 = 0x10, - MRDMODE_BLK_CH1 = 0x20, BMIDESR0 = 0x72, UDIDETCR0 = 0x73, DTPR0 = 0x74, BMIDECR1 = 0x78, BMIDECSR = 0x79, - BMIDESR1 = 0x7A, UDIDETCR1 = 0x7B, DTPR1 = 0x7C }; -static int cmd64x_pre_reset(struct ata_port *ap) -{ - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -static int cmd648_pre_reset(struct ata_port *ap) +static int cmd648_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 r; @@ -89,32 +77,20 @@ static int cmd648_pre_reset(struct ata_port *ap) /* Check cable detect bits */ pci_read_config_byte(pdev, BMIDECSR, &r); if (r & (1 << ap->port_no)) - ap->cbl = ATA_CBL_PATA80; - else - ap->cbl = ATA_CBL_PATA40; - - return ata_std_prereset(ap); -} - -static void cmd64x_error_handler(struct ata_port *ap) -{ - return ata_bmdma_drive_eh(ap, cmd64x_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} - -static void cmd648_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, cmd648_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + return ATA_CBL_PATA80; + return ATA_CBL_PATA40; } /** - * cmd64x_set_piomode - set initial PIO mode data + * cmd64x_set_timing - set PIO and MWDMA timing * @ap: ATA interface * @adev: ATA device + * @mode: mode * - * Called to do the PIO mode setup. + * Called to do the PIO and MWDMA mode setup. */ -static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) +static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct ata_timing t; @@ -136,8 +112,9 @@ static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) int arttim = arttim_port[ap->port_no][adev->devno]; int drwtim = drwtim_port[ap->port_no][adev->devno]; - - if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) { + /* ata_timing_compute is smart and will produce timings for MWDMA + that don't violate the drives PIO capabilities. */ + if (ata_timing_compute(adev, mode, &t, T, 0) < 0) { printk(KERN_ERR DRV_NAME ": mode computation failed.\n"); return; } @@ -164,7 +141,9 @@ static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) /* Now convert the clocks into values we can actually stuff into the chip */ - if (t.recover > 1) + if (t.recover == 16) + t.recover = 0; + else if (t.recover > 1) t.recover--; else t.recover = 15; @@ -187,6 +166,20 @@ static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) } /** + * cmd64x_set_piomode - set initial PIO mode data + * @ap: ATA interface + * @adev: ATA device + * + * Used when configuring the devices ot set the PIO timings. All the + * actual work is done by the PIO/MWDMA setting helper + */ + +static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + cmd64x_set_timing(ap, adev, adev->pio_mode); +} + +/** * cmd64x_set_dmamode - set initial DMA mode data * @ap: ATA interface * @adev: ATA device @@ -197,10 +190,7 @@ static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev) { static const u8 udma_data[] = { - 0x31, 0x21, 0x11, 0x25, 0x15, 0x05 - }; - static const u8 mwdma_data[] = { - 0x30, 0x20, 0x10 + 0x30, 0x20, 0x10, 0x20, 0x10, 0x00 }; struct pci_dev *pdev = to_pci_dev(ap->host->dev); @@ -213,13 +203,24 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev) pci_read_config_byte(pdev, pciD, ®D); pci_read_config_byte(pdev, pciU, ®U); - regD &= ~(0x20 << shift); - regU &= ~(0x35 << shift); + /* DMA bits off */ + regD &= ~(0x20 << adev->devno); + /* DMA control bits */ + regU &= ~(0x30 << shift); + /* DMA timing bits */ + regU &= ~(0x05 << adev->devno); - if (adev->dma_mode >= XFER_UDMA_0) + if (adev->dma_mode >= XFER_UDMA_0) { + /* Merge the timing value */ regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift; - else - regD |= mwdma_data[adev->dma_mode - XFER_MW_DMA_0] << shift; + /* Merge the control bits */ + regU |= 1 << adev->devno; /* UDMA on */ + if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */ + regU |= 4 << adev->devno; + } else { + regU &= ~ (1 << adev->devno); /* UDMA off */ + cmd64x_set_timing(ap, adev, adev->dma_mode); + } regD |= 0x20 << adev->devno; @@ -228,28 +229,85 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev) } /** - * cmd648_dma_stop - DMA stop callback - * @qc: Command in progress + * cmd64x_sff_irq_check - check IDE interrupt + * @ap: ATA interface * - * DMA has completed. + * Check IDE interrupt in CFR/ARTTIM23 registers. */ -static void cmd648_bmdma_stop(struct ata_queued_cmd *qc) +static bool cmd64x_sff_irq_check(struct ata_port *ap) { - struct ata_port *ap = qc->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u8 dma_intr; - int dma_reg = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0; - int dma_mask = ap->port_no ? ARTTIM2 : CFR; + int irq_mask = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0; + int irq_reg = ap->port_no ? ARTTIM23 : CFR; + u8 irq_stat; - ata_bmdma_stop(qc); + /* NOTE: reading the register should clear the interrupt */ + pci_read_config_byte(pdev, irq_reg, &irq_stat); - pci_read_config_byte(pdev, dma_reg, &dma_intr); - pci_write_config_byte(pdev, dma_reg, dma_intr | dma_mask); + return irq_stat & irq_mask; } /** - * cmd646r1_dma_stop - DMA stop callback + * cmd64x_sff_irq_clear - clear IDE interrupt + * @ap: ATA interface + * + * Clear IDE interrupt in CFR/ARTTIM23 and DMA status registers. + */ + +static void cmd64x_sff_irq_clear(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + int irq_reg = ap->port_no ? ARTTIM23 : CFR; + u8 irq_stat; + + ata_bmdma_irq_clear(ap); + + /* Reading the register should be enough to clear the interrupt */ + pci_read_config_byte(pdev, irq_reg, &irq_stat); +} + +/** + * cmd648_sff_irq_check - check IDE interrupt + * @ap: ATA interface + * + * Check IDE interrupt in MRDMODE register. + */ + +static bool cmd648_sff_irq_check(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + unsigned long base = pci_resource_start(pdev, 4); + int irq_mask = ap->port_no ? MRDMODE_INTR_CH1 : MRDMODE_INTR_CH0; + u8 mrdmode = inb(base + 1); + + return mrdmode & irq_mask; +} + +/** + * cmd648_sff_irq_clear - clear IDE interrupt + * @ap: ATA interface + * + * Clear IDE interrupt in MRDMODE and DMA status registers. + */ + +static void cmd648_sff_irq_clear(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + unsigned long base = pci_resource_start(pdev, 4); + int irq_mask = ap->port_no ? MRDMODE_INTR_CH1 : MRDMODE_INTR_CH0; + u8 mrdmode; + + ata_bmdma_irq_clear(ap); + + /* Clear this port's interrupt bit (leaving the other port alone) */ + mrdmode = inb(base + 1); + mrdmode &= ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1); + outb(mrdmode | irq_mask, base + 1); +} + +/** + * cmd646r1_bmdma_stop - DMA stop callback * @qc: Command in progress * * Stub for now while investigating the r1 quirk in the old driver. @@ -261,245 +319,215 @@ static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc) } static struct scsi_host_template cmd64x_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations cmd64x_port_ops = { - .port_disable = ata_port_disable, +static const struct ata_port_operations cmd64x_base_ops = { + .inherits = &ata_bmdma_port_ops, .set_piomode = cmd64x_set_piomode, .set_dmamode = cmd64x_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = cmd64x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop +}; + +static struct ata_port_operations cmd64x_port_ops = { + .inherits = &cmd64x_base_ops, + .sff_irq_check = cmd64x_sff_irq_check, + .sff_irq_clear = cmd64x_sff_irq_clear, + .cable_detect = ata_cable_40wire, }; static struct ata_port_operations cmd646r1_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = cmd64x_set_piomode, - .set_dmamode = cmd64x_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = cmd64x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, + .inherits = &cmd64x_base_ops, + .sff_irq_check = cmd64x_sff_irq_check, + .sff_irq_clear = cmd64x_sff_irq_clear, .bmdma_stop = cmd646r1_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, + .cable_detect = ata_cable_40wire, +}; - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop +static struct ata_port_operations cmd646r3_port_ops = { + .inherits = &cmd64x_base_ops, + .sff_irq_check = cmd648_sff_irq_check, + .sff_irq_clear = cmd648_sff_irq_clear, + .cable_detect = ata_cable_40wire, }; static struct ata_port_operations cmd648_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = cmd64x_set_piomode, - .set_dmamode = cmd64x_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = cmd648_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = cmd648_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .inherits = &cmd64x_base_ops, + .sff_irq_check = cmd648_sff_irq_check, + .sff_irq_clear = cmd648_sff_irq_clear, + .cable_detect = cmd648_cable_detect, }; -static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) +static void cmd64x_fixup(struct pci_dev *pdev) { - u32 class_rev; + u8 mrdmode; - static struct ata_port_info cmd_info[6] = { + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); + pci_read_config_byte(pdev, MRDMODE, &mrdmode); + mrdmode &= ~0x30; /* IRQ set up */ + mrdmode |= 0x02; /* Memory read line enable */ + pci_write_config_byte(pdev, MRDMODE, mrdmode); + + /* PPC specific fixup copied from old driver */ +#ifdef CONFIG_PPC + pci_write_config_byte(pdev, UDIDETCR0, 0xF0); +#endif +} + +static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + static const struct ata_port_info cmd_info[7] = { { /* CMD 643 - no UDMA */ - .sht = &cmd64x_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .port_ops = &cmd64x_port_ops }, { /* CMD 646 with broken UDMA */ - .sht = &cmd64x_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .port_ops = &cmd64x_port_ops }, - { /* CMD 646 with working UDMA */ - .sht = &cmd64x_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = ATA_UDMA1, - .port_ops = &cmd64x_port_ops + { /* CMD 646U with broken UDMA */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .port_ops = &cmd646r3_port_ops + }, + { /* CMD 646U2 with working UDMA */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA2, + .port_ops = &cmd646r3_port_ops }, { /* CMD 646 rev 1 */ - .sht = &cmd64x_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .port_ops = &cmd646r1_port_ops }, { /* CMD 648 */ - .sht = &cmd64x_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = ATA_UDMA2, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA4, .port_ops = &cmd648_port_ops }, { /* CMD 649 */ - .sht = &cmd64x_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = ATA_UDMA3, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &cmd648_port_ops } }; - static struct ata_port_info *port_info[2], *info; - u8 mrdmode; + const struct ata_port_info *ppi[] = { + &cmd_info[id->driver_data], + &cmd_info[id->driver_data], + NULL + }; + u8 reg; + int rc; + struct pci_dev *bridge = pdev->bus->self; + /* mobility split bridges don't report enabled ports correctly */ + int port_ok = !(bridge && bridge->vendor == + PCI_VENDOR_ID_MOBILITY_ELECTRONICS); + /* all (with exceptions below) apart from 643 have CNTRL_CH0 bit */ + int cntrl_ch0_ok = (id->driver_data != 0); + + rc = pcim_enable_device(pdev); + if (rc) + return rc; - info = &cmd_info[id->driver_data]; + if (id->driver_data == 0) /* 643 */ + ata_pci_bmdma_clear_simplex(pdev); + + if (pdev->device == PCI_DEVICE_ID_CMD_646) + switch (pdev->revision) { + /* UDMA works since rev 5 */ + default: + ppi[0] = &cmd_info[3]; + ppi[1] = &cmd_info[3]; + break; + /* Interrupts in MRDMODE since rev 3 */ + case 3: + case 4: + ppi[0] = &cmd_info[2]; + ppi[1] = &cmd_info[2]; + break; + /* Rev 1 with other problems? */ + case 1: + ppi[0] = &cmd_info[4]; + ppi[1] = &cmd_info[4]; + /* FALL THRU */ + /* Early revs have no CNTRL_CH0 */ + case 2: + case 0: + cntrl_ch0_ok = 0; + break; + } - pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev); - class_rev &= 0xFF; + cmd64x_fixup(pdev); + + /* check for enabled ports */ + pci_read_config_byte(pdev, CNTRL, ®); + if (!port_ok) + dev_notice(&pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n"); + if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) { + dev_notice(&pdev->dev, "Primary port is disabled\n"); + ppi[0] = &ata_dummy_port_info; - if (id->driver_data == 0) /* 643 */ - ata_pci_clear_simplex(pdev); - - if (pdev->device == PCI_DEVICE_ID_CMD_646) { - /* Does UDMA work ? */ - if (class_rev > 4) - info = &cmd_info[2]; - /* Early rev with other problems ? */ - else if (class_rev == 1) - info = &cmd_info[3]; + } + if (port_ok && !(reg & CNTRL_CH1)) { + dev_notice(&pdev->dev, "Secondary port is disabled\n"); + ppi[1] = &ata_dummy_port_info; } - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); - pci_read_config_byte(pdev, MRDMODE, &mrdmode); - mrdmode &= ~ 0x30; /* IRQ set up */ - mrdmode |= 0x02; /* Memory read line enable */ - pci_write_config_byte(pdev, MRDMODE, mrdmode); + return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0); +} - /* Force PIO 0 here.. */ +#ifdef CONFIG_PM_SLEEP +static int cmd64x_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; - /* PPC specific fixup copied from old driver */ -#ifdef CONFIG_PPC - pci_write_config_byte(pdev, UDIDETCR0, 0xF0); -#endif + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + cmd64x_fixup(pdev); - port_info[0] = port_info[1] = info; - return ata_pci_init_one(pdev, port_info, 2); + ata_host_resume(host); + return 0; } +#endif + +static const struct pci_device_id cmd64x[] = { + { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 }, + { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 }, + { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_648), 5 }, + { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_649), 6 }, -static struct pci_device_id cmd64x[] = { - { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, - { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, - { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5}, - { 0, }, + { }, }; static struct pci_driver cmd64x_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = cmd64x, .probe = cmd64x_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = cmd64x_reinit_one, +#endif }; -static int __init cmd64x_init(void) -{ - return pci_register_driver(&cmd64x_pci_driver); -} - - -static void __exit cmd64x_exit(void) -{ - pci_unregister_driver(&cmd64x_pci_driver); -} - +module_pci_driver(cmd64x_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cmd64x); MODULE_VERSION(DRV_VERSION); - -module_init(cmd64x_init); -module_exit(cmd64x_exit); diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index a6c6cebd0da..d65cb9d2fa8 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c @@ -29,19 +29,18 @@ * General Public License for more details. * * Documentation: - * Not publically available. + * Not publicly available. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_cs5520" -#define DRV_VERSION "0.6.2" +#define DRV_VERSION "0.6.6" struct pio_clocks { @@ -90,48 +89,12 @@ static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int } /** - * cs5520_enable_dma - turn on DMA bits - * - * Turn on the DMA bits for this disk. Needed because the BIOS probably - * has not done the work for us. Belongs in the core SATA code. - */ - -static void cs5520_enable_dma(struct ata_port *ap, struct ata_device *adev) -{ - /* Set the DMA enable/disable flag */ - u8 reg = inb(ap->ioaddr.bmdma_addr + 0x02); - reg |= 1<<(adev->devno + 5); - outb(reg, ap->ioaddr.bmdma_addr + 0x02); -} - -/** - * cs5520_set_dmamode - program DMA timings - * @ap: ATA port - * @adev: ATA device - * - * Program the DMA mode timings for the controller according to the pio - * clocking table. Note that this device sets the DMA timings to PIO - * mode values. This may seem bizarre but the 5520 architecture talks - * PIO mode to the disk and DMA mode to the controller so the underlying - * transfers are PIO timed. - */ - -static void cs5520_set_dmamode(struct ata_port *ap, struct ata_device *adev) -{ - static const int dma_xlate[3] = { XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 }; - cs5520_set_timings(ap, adev, dma_xlate[adev->dma_mode]); - cs5520_enable_dma(ap, adev); -} - -/** * cs5520_set_piomode - program PIO timings * @ap: ATA port * @adev: ATA device * * Program the PIO mode timings for the controller according to the pio - * clocking table. We know pio_mode will equal dma_mode because of the - * CS5520 architecture. At least once we turned DMA on and wrote a - * mode setter. + * clocking table. */ static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev) @@ -139,196 +102,209 @@ static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev) cs5520_set_timings(ap, adev, adev->pio_mode); } - -static int cs5520_pre_reset(struct ata_port *ap) -{ - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -static void cs5520_error_handler(struct ata_port *ap) -{ - return ata_bmdma_drive_eh(ap, cs5520_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} - static struct scsi_host_template cs5520_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), + .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; static struct ata_port_operations cs5520_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_bmdma_port_ops, + .qc_prep = ata_bmdma_dumb_qc_prep, + .cable_detect = ata_cable_40wire, .set_piomode = cs5520_set_piomode, - .set_dmamode = cs5520_set_dmamode, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = cs5520_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, }; -static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) +static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { + static const unsigned int cmd_port[] = { 0x1F0, 0x170 }; + static const unsigned int ctl_port[] = { 0x3F6, 0x376 }; + struct ata_port_info pi = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .port_ops = &cs5520_port_ops, + }; + const struct ata_port_info *ppi[2]; u8 pcicfg; - static struct ata_probe_ent probe[2]; - int ports = 0; + void __iomem *iomap[5]; + struct ata_host *host; + struct ata_ioports *ioaddr; + int i, rc; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; /* IDE port enable bits */ - pci_read_config_byte(dev, 0x60, &pcicfg); + pci_read_config_byte(pdev, 0x60, &pcicfg); /* Check if the ATA ports are enabled */ if ((pcicfg & 3) == 0) return -ENODEV; + ppi[0] = ppi[1] = &ata_dummy_port_info; + if (pcicfg & 1) + ppi[0] = π + if (pcicfg & 2) + ppi[1] = π + if ((pcicfg & 0x40) == 0) { - printk(KERN_WARNING DRV_NAME ": DMA mode disabled. Enabling.\n"); - pci_write_config_byte(dev, 0x60, pcicfg | 0x40); + dev_warn(&pdev->dev, "DMA mode disabled. Enabling.\n"); + pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); } + pi.mwdma_mask = id->driver_data; + + host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); + if (!host) + return -ENOMEM; + /* Perform set up for DMA */ - if (pci_enable_device_bars(dev, 1<<2)) { + if (pci_enable_device_io(pdev)) { printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n"); return -ENODEV; } - pci_set_master(dev); - if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) { + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); return -ENODEV; } - if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); return -ENODEV; } - /* We have to do our own plumbing as the PCI setup for this - chipset is non-standard so we can't punt to the libata code */ - - INIT_LIST_HEAD(&probe[0].node); - probe[0].dev = pci_dev_to_dev(dev); - probe[0].port_ops = &cs5520_port_ops; - probe[0].sht = &cs5520_sht; - probe[0].pio_mask = 0x1F; - probe[0].mwdma_mask = id->driver_data; - probe[0].irq = 14; - probe[0].irq_flags = 0; - probe[0].port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST; - probe[0].n_ports = 1; - probe[0].port[0].cmd_addr = 0x1F0; - probe[0].port[0].ctl_addr = 0x3F6; - probe[0].port[0].altstatus_addr = 0x3F6; - probe[0].port[0].bmdma_addr = pci_resource_start(dev, 2); - - /* The secondary lurks at different addresses but is otherwise - the same beastie */ - - probe[1] = probe[0]; - INIT_LIST_HEAD(&probe[1].node); - probe[1].irq = 15; - probe[1].port[0].cmd_addr = 0x170; - probe[1].port[0].ctl_addr = 0x376; - probe[1].port[0].altstatus_addr = 0x376; - probe[1].port[0].bmdma_addr = pci_resource_start(dev, 2) + 8; - - /* Let libata fill in the port details */ - ata_std_ports(&probe[0].port[0]); - ata_std_ports(&probe[1].port[0]); - - /* Now add the ports that are active */ - if (pcicfg & 1) - ports += ata_device_add(&probe[0]); - if (pcicfg & 2) - ports += ata_device_add(&probe[1]); - if (ports) - return 0; - return -ENODEV; + /* Map IO ports and initialize host accordingly */ + iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8); + iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1); + iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8); + iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1); + iomap[4] = pcim_iomap(pdev, 2, 0); + + if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4]) + return -ENOMEM; + + ioaddr = &host->ports[0]->ioaddr; + ioaddr->cmd_addr = iomap[0]; + ioaddr->ctl_addr = iomap[1]; + ioaddr->altstatus_addr = iomap[1]; + ioaddr->bmdma_addr = iomap[4]; + ata_sff_std_ports(ioaddr); + + ata_port_desc(host->ports[0], + "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]); + ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma"); + + ioaddr = &host->ports[1]->ioaddr; + ioaddr->cmd_addr = iomap[2]; + ioaddr->ctl_addr = iomap[3]; + ioaddr->altstatus_addr = iomap[3]; + ioaddr->bmdma_addr = iomap[4] + 8; + ata_sff_std_ports(ioaddr); + + ata_port_desc(host->ports[1], + "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]); + ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma"); + + /* activate the host */ + pci_set_master(pdev); + rc = ata_host_start(host); + if (rc) + return rc; + + for (i = 0; i < 2; i++) { + static const int irq[] = { 14, 15 }; + struct ata_port *ap = host->ports[i]; + + if (ata_port_is_dummy(ap)) + continue; + + rc = devm_request_irq(&pdev->dev, irq[ap->port_no], + ata_bmdma_interrupt, 0, DRV_NAME, host); + if (rc) + return rc; + + ata_port_desc(ap, "irq %d", irq[i]); + } + + return ata_host_register(host, &cs5520_sht); +} + +#ifdef CONFIG_PM_SLEEP +/** + * cs5520_reinit_one - device resume + * @pdev: PCI device + * + * Do any reconfiguration work needed by a resume from RAM. We need + * to restore DMA mode support on BIOSen which disabled it + */ + +static int cs5520_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + u8 pcicfg; + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + pci_read_config_byte(pdev, 0x60, &pcicfg); + if ((pcicfg & 0x40) == 0) + pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); + + ata_host_resume(host); + return 0; } /** - * cs5520_remove_one - device unload - * @pdev: PCI device being removed + * cs5520_pci_device_suspend - device suspend + * @pdev: PCI device * - * Handle an unplug/unload event for a PCI device. Unload the - * PCI driver but do not use the default handler as we manage - * resources ourself and *MUST NOT* disable the device as it has - * other functions. + * We have to cut and waste bits from the standard method because + * the 5520 is a bit odd and not just a pure ATA device. As a result + * we must not disable it. The needed code is short and this avoids + * chip specific mess in the core code. */ -static void __devexit cs5520_remove_one(struct pci_dev *pdev) +static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { - struct device *dev = pci_dev_to_dev(pdev); - struct ata_host *host = dev_get_drvdata(dev); + struct ata_host *host = pci_get_drvdata(pdev); + int rc = 0; - ata_host_remove(host); - dev_set_drvdata(dev, NULL); + rc = ata_host_suspend(host, mesg); + if (rc) + return rc; + + pci_save_state(pdev); + return 0; } +#endif /* CONFIG_PM_SLEEP */ /* For now keep DMA off. We can set it for all but A rev CS5510 once the core ATA code can handle it */ -static struct pci_device_id pata_cs5520[] = { - { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510), }, - { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520), }, - { 0, }, +static const struct pci_device_id pata_cs5520[] = { + { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), }, + { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), }, + + { }, }; static struct pci_driver cs5520_pci_driver = { .name = DRV_NAME, .id_table = pata_cs5520, .probe = cs5520_init_one, - .remove = cs5520_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = cs5520_pci_device_suspend, + .resume = cs5520_reinit_one, +#endif }; - -static int __init cs5520_init(void) -{ - return pci_register_driver(&cs5520_pci_driver); -} - -static void __exit cs5520_exit(void) -{ - pci_unregister_driver(&cs5520_pci_driver); -} +module_pci_driver(cs5520_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Cyrix CS5510/5520"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pata_cs5520); MODULE_VERSION(DRV_VERSION); - -module_init(cs5520_init); -module_exit(cs5520_exit); - diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c index 7bba4d954e9..48ae4b43447 100644 --- a/drivers/ata/pata_cs5530.c +++ b/drivers/ata/pata_cs5530.c @@ -1,7 +1,6 @@ /* * pata-cs5530.c - CS5530 PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox <alan@redhat.com> * * based upon cs5530.c by Mark Lord. * @@ -27,7 +26,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> @@ -35,7 +33,14 @@ #include <linux/dmi.h> #define DRV_NAME "pata_cs5530" -#define DRV_VERSION "0.6" +#define DRV_VERSION "0.7.4" + +static void __iomem *cs5530_port_base(struct ata_port *ap) +{ + unsigned long bmdma = (unsigned long)ap->ioaddr.bmdma_addr; + + return (void __iomem *)((bmdma & ~0x0F) + 0x20 + 0x10 * ap->port_no); +} /** * cs5530_set_piomode - PIO setup @@ -52,19 +57,19 @@ static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev) {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010} }; - unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no; + void __iomem *base = cs5530_port_base(ap); u32 tuning; int format; /* Find out which table to use */ - tuning = inl(base + 0x04); + tuning = ioread32(base + 0x04); format = (tuning & 0x80000000UL) ? 1 : 0; /* Now load the right timing register */ if (adev->devno) base += 0x08; - outl(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base); + iowrite32(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base); } /** @@ -79,12 +84,12 @@ static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev) static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev) { - unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no; + void __iomem *base = cs5530_port_base(ap); u32 tuning, timing = 0; u8 reg; /* Find out which table to use */ - tuning = inl(base + 0x04); + tuning = ioread32(base + 0x04); switch(adev->dma_mode) { case XFER_UDMA_0: @@ -105,20 +110,20 @@ static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev) /* Merge in the PIO format bit */ timing |= (tuning & 0x80000000UL); if (adev->devno == 0) /* Master */ - outl(timing, base + 0x04); + iowrite32(timing, base + 0x04); else { if (timing & 0x00100000) tuning |= 0x00100000; /* UDMA for both */ else tuning &= ~0x00100000; /* MWDMA for both */ - outl(tuning, base + 0x04); - outl(timing, base + 0x0C); + iowrite32(tuning, base + 0x04); + iowrite32(timing, base + 0x0C); } /* Set the DMA capable bit in the BMDMA area */ - reg = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + reg = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); reg |= (1 << (5 + adev->devno)); - outb(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + iowrite8(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); /* Remember the last DMA setup we did */ @@ -126,99 +131,50 @@ static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev) } /** - * cs5530_qc_issue_prot - command issue + * cs5530_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if - * neccessary. Specifically we have a problem that there is only + * necessary. Specifically we have a problem that there is only * one MWDMA/UDMA bit. */ -static unsigned int cs5530_qc_issue_prot(struct ata_queued_cmd *qc) +static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct ata_device *prev = ap->private_data; /* See if the DMA settings could be wrong */ - if (adev->dma_mode != 0 && adev != prev && prev != NULL) { + if (ata_dma_enabled(adev) && adev != prev && prev != NULL) { /* Maybe, but do the channels match MWDMA/UDMA ? */ - if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) || - (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0)) + if ((ata_using_udma(adev) && !ata_using_udma(prev)) || + (ata_using_udma(prev) && !ata_using_udma(adev))) /* Switch the mode bits */ cs5530_set_dmamode(ap, adev); } - return ata_qc_issue_prot(qc); -} - -static int cs5530_pre_reset(struct ata_port *ap) -{ - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -static void cs5530_error_handler(struct ata_port *ap) -{ - return ata_bmdma_drive_eh(ap, cs5530_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + return ata_bmdma_qc_issue(qc); } - static struct scsi_host_template cs5530_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), + .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; static struct ata_port_operations cs5530_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = cs5530_set_piomode, - .set_dmamode = cs5530_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, + .inherits = &ata_bmdma_port_ops, - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = cs5530_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, + .qc_prep = ata_bmdma_dumb_qc_prep, + .qc_issue = cs5530_qc_issue, - .qc_prep = ata_qc_prep, - .qc_issue = cs5530_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .cable_detect = ata_cable_40wire, + .set_piomode = cs5530_set_piomode, + .set_dmamode = cs5530_set_dmamode, }; -static struct dmi_system_id palmax_dmi_table[] = { +static const struct dmi_system_id palmax_dmi_table[] = { { .ident = "Palmax PD1100", .matches = { @@ -238,38 +194,18 @@ static int cs5530_is_palmax(void) return 0; } + /** - * cs5530_init_one - Initialise a CS5530 - * @dev: PCI device - * @id: Entry in match table + * cs5530_init_chip - Chipset init * - * Install a driver for the newly found CS5530 companion chip. Most of - * this is just housekeeping. We have to set the chip up correctly and - * turn off various bits of emulation magic. + * Perform the chip initialisation work that is shared between both + * setup and resume paths */ -static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id) +static int cs5530_init_chip(void) { - int compiler_warning_pointless_fix; - struct pci_dev *master_0 = NULL, *cs5530_0 = NULL; - static struct ata_port_info info = { - .sht = &cs5530_sht, - .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x07, - .port_ops = &cs5530_port_ops - }; - /* The docking connector doesn't do UDMA, and it seems not MWDMA */ - static struct ata_port_info info_palmax_secondary = { - .sht = &cs5530_sht, - .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST, - .pio_mask = 0x1f, - .port_ops = &cs5530_port_ops - }; - static struct ata_port_info *port_info[2] = { &info, &info }; + struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL; - dev = NULL; while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) { switch (dev->device) { case PCI_DEVICE_ID_CYRIX_PCI_MASTER: @@ -290,7 +226,7 @@ static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id) } pci_set_master(cs5530_0); - compiler_warning_pointless_fix = pci_set_mwi(cs5530_0); + pci_try_set_mwi(cs5530_0); /* * Set PCI CacheLineSize to 16-bytes: @@ -338,13 +274,7 @@ static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id) pci_dev_put(master_0); pci_dev_put(cs5530_0); - - if (cs5530_is_palmax()) - port_info[1] = &info_palmax_secondary; - - /* Now kick off ATA set up */ - return ata_pci_init_one(dev, port_info, 2); - + return 0; fail_put: if (master_0) pci_dev_put(master_0); @@ -353,35 +283,89 @@ fail_put: return -ENODEV; } -static struct pci_device_id cs5530[] = { - { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), }, - { 0, }, -}; - -static struct pci_driver cs5530_pci_driver = { - .name = DRV_NAME, - .id_table = cs5530, - .probe = cs5530_init_one, - .remove = ata_pci_remove_one -}; +/** + * cs5530_init_one - Initialise a CS5530 + * @dev: PCI device + * @id: Entry in match table + * + * Install a driver for the newly found CS5530 companion chip. Most of + * this is just housekeeping. We have to set the chip up correctly and + * turn off various bits of emulation magic. + */ -static int __init cs5530_init(void) +static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { - return pci_register_driver(&cs5530_pci_driver); -} + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA2, + .port_ops = &cs5530_port_ops + }; + /* The docking connector doesn't do UDMA, and it seems not MWDMA */ + static const struct ata_port_info info_palmax_secondary = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .port_ops = &cs5530_port_ops + }; + const struct ata_port_info *ppi[] = { &info, NULL }; + int rc; + rc = pcim_enable_device(pdev); + if (rc) + return rc; -static void __exit cs5530_exit(void) + /* Chip initialisation */ + if (cs5530_init_chip()) + return -ENODEV; + + if (cs5530_is_palmax()) + ppi[1] = &info_palmax_secondary; + + /* Now kick off ATA set up */ + return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0); +} + +#ifdef CONFIG_PM_SLEEP +static int cs5530_reinit_one(struct pci_dev *pdev) { - pci_unregister_driver(&cs5530_pci_driver); + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + /* If we fail on resume we are doomed */ + if (cs5530_init_chip()) + return -EIO; + + ata_host_resume(host); + return 0; } +#endif /* CONFIG_PM_SLEEP */ +static const struct pci_device_id cs5530[] = { + { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), }, + + { }, +}; + +static struct pci_driver cs5530_pci_driver = { + .name = DRV_NAME, + .id_table = cs5530, + .probe = cs5530_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = cs5530_reinit_one, +#endif +}; + +module_pci_driver(cs5530_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cs5530); MODULE_VERSION(DRV_VERSION); - -module_init(cs5530_init); -module_exit(cs5530_exit); diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c index d64fcdceaf0..97584e8456d 100644 --- a/drivers/ata/pata_cs5535.c +++ b/drivers/ata/pata_cs5535.c @@ -1,10 +1,10 @@ /* * pata-cs5535.c - CS5535 PATA for new ATA layer * (C) 2005-2006 Red Hat Inc - * Alan Cox <alan@redhat.com> + * Alan Cox <alan@lxorguk.ukuu.org.uk> * * based upon cs5535.c from AMD <Jens.Altmann@amd.com> as cleaned up and - * made readable and Linux style by Wolfgang Zuleger <wolfgang.zuleger@gmx.de + * made readable and Linux style by Wolfgang Zuleger <wolfgang.zuleger@gmx.de> * and Alexander Kiausch <alex.kiausch@t-online.de> * * This program is free software; you can redistribute it and/or modify @@ -25,21 +25,20 @@ * Documentation: * Available from AMD web site. * TODO - * Review errata to see if serializing is neccessary + * Review errata to see if serializing is necessary */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <asm/msr.h> -#define DRV_NAME "cs5535" -#define DRV_VERSION "0.2.10" +#define DRV_NAME "pata_cs5535" +#define DRV_VERSION "0.2.12" /* * The Geode (Aka Athlon GX now) uses an internal MSR based @@ -67,39 +66,24 @@ #define CS5535_CABLE_DETECT 0x48 -#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL)==0x00009172 ) - /** - * cs5535_pre_reset - detect cable type + * cs5535_cable_detect - detect cable type * @ap: Port to detect on * * Perform cable detection for ATA66 capable cable. Return a libata * cable type. */ -static int cs5535_pre_reset(struct ata_port *ap) +static int cs5535_cable_detect(struct ata_port *ap) { u8 cable; struct pci_dev *pdev = to_pci_dev(ap->host->dev); pci_read_config_byte(pdev, CS5535_CABLE_DETECT, &cable); if (cable & 1) - ap->cbl = ATA_CBL_PATA80; + return ATA_CBL_PATA80; else - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -/** - * cs5535_error_handler - reset/probe - * @ap: Port to reset - * - * Reset and configure a port - */ - -static void cs5535_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, cs5535_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + return ATA_CBL_PATA40; } /** @@ -113,7 +97,7 @@ static void cs5535_error_handler(struct ata_port *ap) static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev) { static const u16 pio_timings[5] = { - 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131 + 0xF7F4, 0xF173, 0x8141, 0x5131, 0x1131 }; static const u16 pio_cmd_timings[5] = { 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131 @@ -170,56 +154,14 @@ static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev) } static struct scsi_host_template cs5535_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations cs5535_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_bmdma_port_ops, + .cable_detect = cs5535_cable_detect, .set_piomode = cs5535_set_piomode, .set_dmamode = cs5535_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = cs5535_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; /** @@ -234,58 +176,40 @@ static struct ata_port_operations cs5535_port_ops = { static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - static struct ata_port_info info = { - .sht = &cs5535_sht, - .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x1f, + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA4, .port_ops = &cs5535_port_ops }; - struct ata_port_info *ports[1] = { &info }; - - u32 timings, dummy; + const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; - /* Check the BIOS set the initial timing clock. If not set the - timings for PIO0 */ - rdmsr(ATAC_CH0D0_PIO, timings, dummy); - if (CS5535_BAD_PIO(timings)) - wrmsr(ATAC_CH0D0_PIO, 0xF7F4F7F4UL, 0); - rdmsr(ATAC_CH0D1_PIO, timings, dummy); - if (CS5535_BAD_PIO(timings)) - wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0); - return ata_pci_init_one(dev, ports, 1); + return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0); } -static struct pci_device_id cs5535[] = { - { PCI_DEVICE(PCI_VENDOR_ID_NS, 0x002D), }, - { 0, }, +static const struct pci_device_id cs5535[] = { + { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_CS5535_IDE), }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5535_IDE), }, + + { }, }; static struct pci_driver cs5535_pci_driver = { .name = DRV_NAME, .id_table = cs5535, .probe = cs5535_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init cs5535_init(void) -{ - return pci_register_driver(&cs5535_pci_driver); -} - - -static void __exit cs5535_exit(void) -{ - pci_unregister_driver(&cs5535_pci_driver); -} - +module_pci_driver(cs5535_pci_driver); MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch"); -MODULE_DESCRIPTION("low-level driver for the NS/AMD 5530"); +MODULE_DESCRIPTION("low-level driver for the NS/AMD 5535"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cs5535); MODULE_VERSION(DRV_VERSION); - -module_init(cs5535_init); -module_exit(cs5535_exit); diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c new file mode 100644 index 00000000000..6c15a554efb --- /dev/null +++ b/drivers/ata/pata_cs5536.c @@ -0,0 +1,312 @@ +/* + * pata_cs5536.c - CS5536 PATA for new ATA layer + * (C) 2007 Martin K. Petersen <mkp@mkp.net> + * (C) 2011 Bartlomiej Zolnierkiewicz + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Documentation: + * Available from AMD web site. + * + * The IDE timing registers for the CS5536 live in the Geode Machine + * Specific Register file and not PCI config space. Most BIOSes + * virtualize the PCI registers so the chip looks like a standard IDE + * controller. Unfortunately not all implementations get this right. + * In particular some have problems with unaligned accesses to the + * virtualized PCI registers. This driver always does full dword + * writes to work around the issue. Also, in case of a bad BIOS this + * driver can be loaded with the "msr=1" parameter which forces using + * the Machine Specific Registers to configure the device. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/libata.h> +#include <scsi/scsi_host.h> +#include <linux/dmi.h> + +#ifdef CONFIG_X86_32 +#include <asm/msr.h> +static int use_msr; +module_param_named(msr, use_msr, int, 0644); +MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)"); +#else +#undef rdmsr /* avoid accidental MSR usage on, e.g. x86-64 */ +#undef wrmsr +#define rdmsr(x, y, z) do { } while (0) +#define wrmsr(x, y, z) do { } while (0) +#define use_msr 0 +#endif + +#define DRV_NAME "pata_cs5536" +#define DRV_VERSION "0.0.8" + +enum { + MSR_IDE_CFG = 0x51300010, + PCI_IDE_CFG = 0x40, + + CFG = 0, + DTC = 2, + CAST = 3, + ETC = 4, + + IDE_CFG_CHANEN = (1 << 1), + IDE_CFG_CABLE = (1 << 17) | (1 << 16), + + IDE_D0_SHIFT = 24, + IDE_D1_SHIFT = 16, + IDE_DRV_MASK = 0xff, + + IDE_CAST_D0_SHIFT = 6, + IDE_CAST_D1_SHIFT = 4, + IDE_CAST_DRV_MASK = 0x3, + IDE_CAST_CMD_MASK = 0xff, + IDE_CAST_CMD_SHIFT = 24, + + IDE_ETC_UDMA_MASK = 0xc0, +}; + +/* Some Bachmann OT200 devices have a non working UDMA support due a + * missing resistor. + */ +static const struct dmi_system_id udma_quirk_dmi_table[] = { + { + .ident = "Bachmann electronic OT200", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Bachmann electronic"), + DMI_MATCH(DMI_PRODUCT_NAME, "OT200"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1") + }, + }, + { } +}; + +static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val) +{ + if (unlikely(use_msr)) { + u32 dummy __maybe_unused; + + rdmsr(MSR_IDE_CFG + reg, *val, dummy); + return 0; + } + + return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val); +} + +static int cs5536_write(struct pci_dev *pdev, int reg, int val) +{ + if (unlikely(use_msr)) { + wrmsr(MSR_IDE_CFG + reg, val, 0); + return 0; + } + + return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val); +} + +static void cs5536_program_dtc(struct ata_device *adev, u8 tim) +{ + struct pci_dev *pdev = to_pci_dev(adev->link->ap->host->dev); + int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT; + u32 dtc; + + cs5536_read(pdev, DTC, &dtc); + dtc &= ~(IDE_DRV_MASK << dshift); + dtc |= tim << dshift; + cs5536_write(pdev, DTC, dtc); +} + +/** + * cs5536_cable_detect - detect cable type + * @ap: Port to detect on + * + * Perform cable detection for ATA66 capable cable. + * + * Returns a cable type. + */ + +static int cs5536_cable_detect(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u32 cfg; + + cs5536_read(pdev, CFG, &cfg); + + if (cfg & IDE_CFG_CABLE) + return ATA_CBL_PATA80; + else + return ATA_CBL_PATA40; +} + +/** + * cs5536_set_piomode - PIO setup + * @ap: ATA interface + * @adev: device on the interface + */ + +static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + static const u8 drv_timings[5] = { + 0x98, 0x55, 0x32, 0x21, 0x20, + }; + + static const u8 addr_timings[5] = { + 0x2, 0x1, 0x0, 0x0, 0x0, + }; + + static const u8 cmd_timings[5] = { + 0x99, 0x92, 0x90, 0x22, 0x20, + }; + + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct ata_device *pair = ata_dev_pair(adev); + int mode = adev->pio_mode - XFER_PIO_0; + int cmdmode = mode; + int cshift = adev->devno ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT; + u32 cast; + + if (pair) + cmdmode = min(mode, pair->pio_mode - XFER_PIO_0); + + cs5536_program_dtc(adev, drv_timings[mode]); + + cs5536_read(pdev, CAST, &cast); + + cast &= ~(IDE_CAST_DRV_MASK << cshift); + cast |= addr_timings[mode] << cshift; + + cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT); + cast |= cmd_timings[cmdmode] << IDE_CAST_CMD_SHIFT; + + cs5536_write(pdev, CAST, cast); +} + +/** + * cs5536_set_dmamode - DMA timing setup + * @ap: ATA interface + * @adev: Device being configured + * + */ + +static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev) +{ + static const u8 udma_timings[6] = { + 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6, + }; + + static const u8 mwdma_timings[3] = { + 0x67, 0x21, 0x20, + }; + + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u32 etc; + int mode = adev->dma_mode; + int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT; + + cs5536_read(pdev, ETC, &etc); + + if (mode >= XFER_UDMA_0) { + etc &= ~(IDE_DRV_MASK << dshift); + etc |= udma_timings[mode - XFER_UDMA_0] << dshift; + } else { /* MWDMA */ + etc &= ~(IDE_ETC_UDMA_MASK << dshift); + cs5536_program_dtc(adev, mwdma_timings[mode - XFER_MW_DMA_0]); + } + + cs5536_write(pdev, ETC, etc); +} + +static struct scsi_host_template cs5536_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations cs5536_port_ops = { + .inherits = &ata_bmdma32_port_ops, + .cable_detect = cs5536_cable_detect, + .set_piomode = cs5536_set_piomode, + .set_dmamode = cs5536_set_dmamode, +}; + +/** + * cs5536_init_one + * @dev: PCI device + * @id: Entry in match table + * + */ + +static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) +{ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, + .port_ops = &cs5536_port_ops, + }; + + static const struct ata_port_info no_udma_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .port_ops = &cs5536_port_ops, + }; + + + const struct ata_port_info *ppi[2]; + u32 cfg; + + if (dmi_check_system(udma_quirk_dmi_table)) + ppi[0] = &no_udma_info; + else + ppi[0] = &info; + + ppi[1] = &ata_dummy_port_info; + + if (use_msr) + printk(KERN_ERR DRV_NAME ": Using MSR regs instead of PCI\n"); + + cs5536_read(dev, CFG, &cfg); + + if ((cfg & IDE_CFG_CHANEN) == 0) { + printk(KERN_ERR DRV_NAME ": disabled by BIOS\n"); + return -ENODEV; + } + + return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0); +} + +static const struct pci_device_id cs5536[] = { + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), }, + { }, +}; + +static struct pci_driver cs5536_pci_driver = { + .name = DRV_NAME, + .id_table = cs5536, + .probe = cs5536_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif +}; + +module_pci_driver(cs5536_pci_driver); + +MODULE_AUTHOR("Martin K. Petersen"); +MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, cs5536); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c index dfa5ac53904..793018460d8 100644 --- a/drivers/ata/pata_cypress.c +++ b/drivers/ata/pata_cypress.c @@ -1,7 +1,7 @@ /* * pata_cypress.c - Cypress PATA for new ATA layer * (C) 2006 Red Hat Inc - * Alan Cox <alan@redhat.com> + * Alan Cox * * Based heavily on * linux/drivers/ide/pci/cy82c693.c Version 0.40 Sep. 10, 2002 @@ -11,14 +11,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_cypress" -#define DRV_VERSION "0.1.2" +#define DRV_VERSION "0.1.5" /* here are the offset definitions for the registers */ @@ -41,17 +40,6 @@ enum { CY82_INDEX_TIMEOUT = 0x32 }; -static int cy82c693_pre_reset(struct ata_port *ap) -{ - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -static void cy82c693_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, cy82c693_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} - /** * cy82c693_set_piomode - set initial PIO mode data * @ap: ATA interface @@ -73,14 +61,16 @@ static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev) return; } - time_16 = FIT(t.recover, 0, 15) | (FIT(t.active, 0, 15) << 4); - time_8 = FIT(t.act8b, 0, 15) | (FIT(t.rec8b, 0, 15) << 4); + time_16 = clamp_val(t.recover - 1, 0, 15) | + (clamp_val(t.active - 1, 0, 15) << 4); + time_8 = clamp_val(t.act8b - 1, 0, 15) | + (clamp_val(t.rec8b - 1, 0, 15) << 4); if (adev->devno == 0) { pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr); addr &= ~0x0F; /* Mask bits */ - addr |= FIT(t.setup, 0, 15); + addr |= clamp_val(t.setup - 1, 0, 15); pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr); pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16); @@ -90,7 +80,7 @@ static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev) pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr); addr &= ~0xF0; /* Mask bits */ - addr |= (FIT(t.setup, 0, 15) << 4); + addr |= (clamp_val(t.setup - 1, 0, 15) << 4); pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr); pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16); @@ -121,107 +111,56 @@ static void cy82c693_set_dmamode(struct ata_port *ap, struct ata_device *adev) } static struct scsi_host_template cy82c693_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations cy82c693_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_40wire, .set_piomode = cy82c693_set_piomode, .set_dmamode = cy82c693_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = cy82c693_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { - static struct ata_port_info info = { - .sht = &cy82c693_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .port_ops = &cy82c693_port_ops }; - static struct ata_port_info *port_info[1] = { &info }; + const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; - /* Devfn 1 is the ATA primary. The secondary is magic and on devfn2. For the - moment we don't handle the secondary. FIXME */ + /* Devfn 1 is the ATA primary. The secondary is magic and on devfn2. + For the moment we don't handle the secondary. FIXME */ if (PCI_FUNC(pdev->devfn) != 1) return -ENODEV; - return ata_pci_init_one(pdev, port_info, 1); + return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0); } -static struct pci_device_id cy82c693[] = { - { PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - { 0, }, +static const struct pci_device_id cy82c693[] = { + { PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), }, + + { }, }; static struct pci_driver cy82c693_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = cy82c693, .probe = cy82c693_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init cy82c693_init(void) -{ - return pci_register_driver(&cy82c693_pci_driver); -} - - -static void __exit cy82c693_exit(void) -{ - pci_unregister_driver(&cy82c693_pci_driver); -} - +module_pci_driver(cy82c693_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the CY82C693 PATA controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cy82c693); MODULE_VERSION(DRV_VERSION); - -module_init(cy82c693_init); -module_exit(cy82c693_exit); diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index 95cd1ca181f..4a57a6f032d 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c @@ -1,7 +1,8 @@ /* * pata_efar.c - EFAR PIIX clone controller driver * - * (C) 2005 Red Hat <alan@redhat.com> + * (C) 2005 Red Hat + * (C) 2009-2010 Bartlomiej Zolnierkiewicz * * Some parts based on ata_piix.c by Jeff Garzik and others. * @@ -13,7 +14,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> @@ -22,54 +22,57 @@ #include <linux/ata.h> #define DRV_NAME "pata_efar" -#define DRV_VERSION "0.4.2" +#define DRV_VERSION "0.4.5" /** - * efar_pre_reset - check for 40/80 pin - * @ap: Port + * efar_pre_reset - Enable bits + * @link: ATA link + * @deadline: deadline jiffies for the operation * * Perform cable detection for the EFAR ATA interface. This is * different to the PIIX arrangement */ -static int efar_pre_reset(struct ata_port *ap) +static int efar_pre_reset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits efar_enable_bits[] = { { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */ }; - + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u8 tmp; if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no])) return -ENOENT; - pci_read_config_byte(pdev, 0x47, &tmp); - if (tmp & (2 >> ap->port_no)) - ap->cbl = ATA_CBL_PATA40; - else - ap->cbl = ATA_CBL_PATA80; - return ata_std_prereset(ap); + return ata_sff_prereset(link, deadline); } /** - * efar_probe_reset - Probe specified port on PATA host controller - * @ap: Port to probe + * efar_cable_detect - check for 40/80 pin + * @ap: Port * - * LOCKING: - * None (inherited from caller). + * Perform cable detection for the EFAR ATA interface. This is + * different to the PIIX arrangement */ -static void efar_error_handler(struct ata_port *ap) +static int efar_cable_detect(struct ata_port *ap) { - ata_bmdma_drive_eh(ap, efar_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u8 tmp; + + pci_read_config_byte(pdev, 0x47, &tmp); + if (tmp & (2 >> ap->port_no)) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; } +static DEFINE_SPINLOCK(efar_lock); + /** * efar_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring - * @adev: um + * @adev: Device to program * * Set PIO mode for device, in host controller PCI config space. * @@ -81,8 +84,10 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev) { unsigned int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *dev = to_pci_dev(ap->host->dev); - unsigned int idetm_port= ap->port_no ? 0x42 : 0x40; - u16 idetm_data; + unsigned int master_port = ap->port_no ? 0x42 : 0x40; + unsigned long flags; + u16 master_data; + u8 udma_enable; int control = 0; /* @@ -97,39 +102,45 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev) { 2, 1 }, { 2, 3 }, }; - if (pio > 2) - control |= 1; /* TIME1 enable */ + if (pio > 1) + control |= 1; /* TIME */ if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */ - control |= 2; /* IE enable */ - /* Intel specifies that the PPE functionality is for disk only */ + control |= 2; /* IE */ + /* Intel specifies that the prefetch/posting is for disk only */ if (adev->class == ATA_DEV_ATA) - control |= 4; /* PPE enable */ + control |= 4; /* PPE */ - pci_read_config_word(dev, idetm_port, &idetm_data); + spin_lock_irqsave(&efar_lock, flags); - /* Enable PPE, IE and TIME as appropriate */ + pci_read_config_word(dev, master_port, &master_data); + /* Set PPE, IE, and TIME as appropriate */ if (adev->devno == 0) { - idetm_data &= 0xCCF0; - idetm_data |= control; - idetm_data |= (timings[pio][0] << 12) | + master_data &= 0xCCF0; + master_data |= control; + master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); } else { int shift = 4 * ap->port_no; u8 slave_data; - idetm_data &= 0xCC0F; - idetm_data |= (control << 4); + master_data &= 0xFF0F; + master_data |= (control << 4); - /* Slave timing in seperate register */ + /* Slave timing in separate register */ pci_read_config_byte(dev, 0x44, &slave_data); - slave_data &= 0x0F << shift; + slave_data &= ap->port_no ? 0x0F : 0xF0; slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << shift; pci_write_config_byte(dev, 0x44, slave_data); } - idetm_data |= 0x4000; /* Ensure SITRE is enabled */ - pci_write_config_word(dev, idetm_port, idetm_data); + master_data |= 0x4000; /* Ensure SITRE is set */ + pci_write_config_word(dev, master_port, master_data); + + pci_read_config_byte(dev, 0x48, &udma_enable); + udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); + pci_write_config_byte(dev, 0x48, udma_enable); + spin_unlock_irqrestore(&efar_lock, flags); } /** @@ -150,6 +161,7 @@ static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev) u16 master_data; u8 speed = adev->dma_mode; int devid = adev->devno + 2 * ap->port_no; + unsigned long flags; u8 udma_enable; static const /* ISP RTC */ @@ -159,6 +171,8 @@ static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev) { 2, 1 }, { 2, 3 }, }; + spin_lock_irqsave(&efar_lock, flags); + pci_read_config_word(dev, master_port, &master_data); pci_read_config_byte(dev, 0x48, &udma_enable); @@ -200,7 +214,7 @@ static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev) master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */ master_data |= control << 4; pci_read_config_byte(dev, 0x44, &slave_data); - slave_data &= (0x0F + 0xE1 * ap->port_no); + slave_data &= ap->port_no ? 0x0F : 0xF0; /* Load the matching timing */ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); pci_write_config_byte(dev, 0x44, slave_data); @@ -216,57 +230,19 @@ static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev) pci_write_config_word(dev, master_port, master_data); } pci_write_config_byte(dev, 0x48, udma_enable); + spin_unlock_irqrestore(&efar_lock, flags); } static struct scsi_host_template efar_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; -static const struct ata_port_operations efar_ops = { - .port_disable = ata_port_disable, +static struct ata_port_operations efar_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = efar_cable_detect, .set_piomode = efar_set_piomode, .set_dmamode = efar_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = efar_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .prereset = efar_pre_reset, }; @@ -286,26 +262,24 @@ static const struct ata_port_operations efar_ops = { static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - static struct ata_port_info info = { - .sht = &efar_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma1-2 */ - .udma_mask = 0x0f, /* UDMA 66 */ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, + .udma_mask = ATA_UDMA4, .port_ops = &efar_ops, }; - static struct ata_port_info *port_info[2] = { &info, &info }; + const struct ata_port_info *ppi[] = { &info, &info }; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, - "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - return ata_pci_init_one(pdev, port_info, 2); + return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL, + ATA_HOST_PARALLEL_SCAN); } static const struct pci_device_id efar_pci_tbl[] = { - { 0x1055, 0x9130, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VDEVICE(EFAR, 0x9130), }, + { } /* terminate list */ }; @@ -314,25 +288,16 @@ static struct pci_driver efar_pci_driver = { .id_table = efar_pci_tbl, .probe = efar_init_one, .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init efar_init(void) -{ - return pci_register_driver(&efar_pci_driver); -} - -static void __exit efar_exit(void) -{ - pci_unregister_driver(&efar_pci_driver); -} - - -module_init(efar_init); -module_exit(efar_exit); +module_pci_driver(efar_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, efar_pci_tbl); MODULE_VERSION(DRV_VERSION); - diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c new file mode 100644 index 00000000000..4d37c5415fc --- /dev/null +++ b/drivers/ata/pata_ep93xx.c @@ -0,0 +1,1037 @@ +/* + * EP93XX PATA controller driver. + * + * Copyright (c) 2012, Metasoft s.c. + * Rafal Prylowski <prylowski@metasoft.pl> + * + * Based on pata_scc.c, pata_icside.c and on earlier version of EP93XX + * PATA driver by Lennert Buytenhek and Alessandro Zummo. + * Read/Write timings, resource management and other improvements + * from driver by Joao Ramos and Bartlomiej Zolnierkiewicz. + * DMA engine support based on spi-ep93xx.c by Mika Westerberg. + * + * Original copyrights: + * + * Support for Cirrus Logic's EP93xx (EP9312, EP9315) CPUs + * PATA host controller driver. + * + * Copyright (c) 2009, Bartlomiej Zolnierkiewicz + * + * Heavily based on the ep93xx-ide.c driver: + * + * Copyright (c) 2009, Joao Ramos <joao.ramos@inov.pt> + * INESC Inovacao (INOV) + * + * EP93XX PATA controller driver. + * Copyright (C) 2007 Lennert Buytenhek <buytenh@wantstofly.org> + * + * An ATA driver for the Cirrus Logic EP93xx PATA controller. + * + * Based on an earlier version by Alessandro Zummo, which is: + * Copyright (C) 2006 Tower Technologies + */ + +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/blkdev.h> +#include <scsi/scsi_host.h> +#include <linux/ata.h> +#include <linux/libata.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/ktime.h> + +#include <linux/platform_data/dma-ep93xx.h> +#include <mach/platform.h> + +#define DRV_NAME "ep93xx-ide" +#define DRV_VERSION "1.0" + +enum { + /* IDE Control Register */ + IDECTRL = 0x00, + IDECTRL_CS0N = (1 << 0), + IDECTRL_CS1N = (1 << 1), + IDECTRL_DIORN = (1 << 5), + IDECTRL_DIOWN = (1 << 6), + IDECTRL_INTRQ = (1 << 9), + IDECTRL_IORDY = (1 << 10), + /* + * the device IDE register to be accessed is selected through + * IDECTRL register's specific bitfields 'DA', 'CS1N' and 'CS0N': + * b4 b3 b2 b1 b0 + * A2 A1 A0 CS1N CS0N + * the values filled in this structure allows the value to be directly + * ORed to the IDECTRL register, hence giving directly the A[2:0] and + * CS1N/CS0N values for each IDE register. + * The values correspond to the transformation: + * ((real IDE address) << 2) | CS1N value << 1 | CS0N value + */ + IDECTRL_ADDR_CMD = 0 + 2, /* CS1 */ + IDECTRL_ADDR_DATA = (ATA_REG_DATA << 2) + 2, + IDECTRL_ADDR_ERROR = (ATA_REG_ERR << 2) + 2, + IDECTRL_ADDR_FEATURE = (ATA_REG_FEATURE << 2) + 2, + IDECTRL_ADDR_NSECT = (ATA_REG_NSECT << 2) + 2, + IDECTRL_ADDR_LBAL = (ATA_REG_LBAL << 2) + 2, + IDECTRL_ADDR_LBAM = (ATA_REG_LBAM << 2) + 2, + IDECTRL_ADDR_LBAH = (ATA_REG_LBAH << 2) + 2, + IDECTRL_ADDR_DEVICE = (ATA_REG_DEVICE << 2) + 2, + IDECTRL_ADDR_STATUS = (ATA_REG_STATUS << 2) + 2, + IDECTRL_ADDR_COMMAND = (ATA_REG_CMD << 2) + 2, + IDECTRL_ADDR_ALTSTATUS = (0x06 << 2) + 1, /* CS0 */ + IDECTRL_ADDR_CTL = (0x06 << 2) + 1, /* CS0 */ + + /* IDE Configuration Register */ + IDECFG = 0x04, + IDECFG_IDEEN = (1 << 0), + IDECFG_PIO = (1 << 1), + IDECFG_MDMA = (1 << 2), + IDECFG_UDMA = (1 << 3), + IDECFG_MODE_SHIFT = 4, + IDECFG_MODE_MASK = (0xf << 4), + IDECFG_WST_SHIFT = 8, + IDECFG_WST_MASK = (0x3 << 8), + + /* MDMA Operation Register */ + IDEMDMAOP = 0x08, + + /* UDMA Operation Register */ + IDEUDMAOP = 0x0c, + IDEUDMAOP_UEN = (1 << 0), + IDEUDMAOP_RWOP = (1 << 1), + + /* PIO/MDMA/UDMA Data Registers */ + IDEDATAOUT = 0x10, + IDEDATAIN = 0x14, + IDEMDMADATAOUT = 0x18, + IDEMDMADATAIN = 0x1c, + IDEUDMADATAOUT = 0x20, + IDEUDMADATAIN = 0x24, + + /* UDMA Status Register */ + IDEUDMASTS = 0x28, + IDEUDMASTS_DMAIDE = (1 << 16), + IDEUDMASTS_INTIDE = (1 << 17), + IDEUDMASTS_SBUSY = (1 << 18), + IDEUDMASTS_NDO = (1 << 24), + IDEUDMASTS_NDI = (1 << 25), + IDEUDMASTS_N4X = (1 << 26), + + /* UDMA Debug Status Register */ + IDEUDMADEBUG = 0x2c, +}; + +struct ep93xx_pata_data { + const struct platform_device *pdev; + void __iomem *ide_base; + struct ata_timing t; + bool iordy; + + unsigned long udma_in_phys; + unsigned long udma_out_phys; + + struct dma_chan *dma_rx_channel; + struct ep93xx_dma_data dma_rx_data; + struct dma_chan *dma_tx_channel; + struct ep93xx_dma_data dma_tx_data; +}; + +static void ep93xx_pata_clear_regs(void __iomem *base) +{ + writel(IDECTRL_CS0N | IDECTRL_CS1N | IDECTRL_DIORN | + IDECTRL_DIOWN, base + IDECTRL); + + writel(0, base + IDECFG); + writel(0, base + IDEMDMAOP); + writel(0, base + IDEUDMAOP); + writel(0, base + IDEDATAOUT); + writel(0, base + IDEDATAIN); + writel(0, base + IDEMDMADATAOUT); + writel(0, base + IDEMDMADATAIN); + writel(0, base + IDEUDMADATAOUT); + writel(0, base + IDEUDMADATAIN); + writel(0, base + IDEUDMADEBUG); +} + +static bool ep93xx_pata_check_iordy(void __iomem *base) +{ + return !!(readl(base + IDECTRL) & IDECTRL_IORDY); +} + +/* + * According to EP93xx User's Guide, WST field of IDECFG specifies number + * of HCLK cycles to hold the data bus after a PIO write operation. + * It should be programmed to guarantee following delays: + * + * PIO Mode [ns] + * 0 30 + * 1 20 + * 2 15 + * 3 10 + * 4 5 + * + * Maximum possible value for HCLK is 100MHz. + */ +static int ep93xx_pata_get_wst(int pio_mode) +{ + int val; + + if (pio_mode == 0) + val = 3; + else if (pio_mode < 3) + val = 2; + else + val = 1; + + return val << IDECFG_WST_SHIFT; +} + +static void ep93xx_pata_enable_pio(void __iomem *base, int pio_mode) +{ + writel(IDECFG_IDEEN | IDECFG_PIO | + ep93xx_pata_get_wst(pio_mode) | + (pio_mode << IDECFG_MODE_SHIFT), base + IDECFG); +} + +/* + * Based on delay loop found in mach-pxa/mp900.c. + * + * Single iteration should take 5 cpu cycles. This is 25ns assuming the + * fastest ep93xx cpu speed (200MHz) and is better optimized for PIO4 timings + * than eg. 20ns. + */ +static void ep93xx_pata_delay(unsigned long count) +{ + __asm__ volatile ( + "0:\n" + "mov r0, r0\n" + "subs %0, %1, #1\n" + "bge 0b\n" + : "=r" (count) + : "0" (count) + ); +} + +static unsigned long ep93xx_pata_wait_for_iordy(void __iomem *base, + unsigned long t2) +{ + /* + * According to ATA specification, IORDY pin can be first sampled + * tA = 35ns after activation of DIOR-/DIOW-. Maximum IORDY pulse + * width is tB = 1250ns. + * + * We are already t2 delay loop iterations after activation of + * DIOR-/DIOW-, so we set timeout to (1250 + 35) / 25 - t2 additional + * delay loop iterations. + */ + unsigned long start = (1250 + 35) / 25 - t2; + unsigned long counter = start; + + while (!ep93xx_pata_check_iordy(base) && counter--) + ep93xx_pata_delay(1); + return start - counter; +} + +/* common part at start of ep93xx_pata_read/write() */ +static void ep93xx_pata_rw_begin(void __iomem *base, unsigned long addr, + unsigned long t1) +{ + writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL); + ep93xx_pata_delay(t1); +} + +/* common part at end of ep93xx_pata_read/write() */ +static void ep93xx_pata_rw_end(void __iomem *base, unsigned long addr, + bool iordy, unsigned long t0, unsigned long t2, + unsigned long t2i) +{ + ep93xx_pata_delay(t2); + /* lengthen t2 if needed */ + if (iordy) + t2 += ep93xx_pata_wait_for_iordy(base, t2); + writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL); + if (t0 > t2 && t0 - t2 > t2i) + ep93xx_pata_delay(t0 - t2); + else + ep93xx_pata_delay(t2i); +} + +static u16 ep93xx_pata_read(struct ep93xx_pata_data *drv_data, + unsigned long addr, + bool reg) +{ + void __iomem *base = drv_data->ide_base; + const struct ata_timing *t = &drv_data->t; + unsigned long t0 = reg ? t->cyc8b : t->cycle; + unsigned long t2 = reg ? t->act8b : t->active; + unsigned long t2i = reg ? t->rec8b : t->recover; + + ep93xx_pata_rw_begin(base, addr, t->setup); + writel(IDECTRL_DIOWN | addr, base + IDECTRL); + /* + * The IDEDATAIN register is loaded from the DD pins at the positive + * edge of the DIORN signal. (EP93xx UG p27-14) + */ + ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i); + return readl(base + IDEDATAIN); +} + +/* IDE register read */ +static u16 ep93xx_pata_read_reg(struct ep93xx_pata_data *drv_data, + unsigned long addr) +{ + return ep93xx_pata_read(drv_data, addr, true); +} + +/* PIO data read */ +static u16 ep93xx_pata_read_data(struct ep93xx_pata_data *drv_data, + unsigned long addr) +{ + return ep93xx_pata_read(drv_data, addr, false); +} + +static void ep93xx_pata_write(struct ep93xx_pata_data *drv_data, + u16 value, unsigned long addr, + bool reg) +{ + void __iomem *base = drv_data->ide_base; + const struct ata_timing *t = &drv_data->t; + unsigned long t0 = reg ? t->cyc8b : t->cycle; + unsigned long t2 = reg ? t->act8b : t->active; + unsigned long t2i = reg ? t->rec8b : t->recover; + + ep93xx_pata_rw_begin(base, addr, t->setup); + /* + * Value from IDEDATAOUT register is driven onto the DD pins when + * DIOWN is low. (EP93xx UG p27-13) + */ + writel(value, base + IDEDATAOUT); + writel(IDECTRL_DIORN | addr, base + IDECTRL); + ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i); +} + +/* IDE register write */ +static void ep93xx_pata_write_reg(struct ep93xx_pata_data *drv_data, + u16 value, unsigned long addr) +{ + ep93xx_pata_write(drv_data, value, addr, true); +} + +/* PIO data write */ +static void ep93xx_pata_write_data(struct ep93xx_pata_data *drv_data, + u16 value, unsigned long addr) +{ + ep93xx_pata_write(drv_data, value, addr, false); +} + +static void ep93xx_pata_set_piomode(struct ata_port *ap, + struct ata_device *adev) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + struct ata_device *pair = ata_dev_pair(adev); + /* + * Calculate timings for the delay loop, assuming ep93xx cpu speed + * is 200MHz (maximum possible for ep93xx). If actual cpu speed is + * slower, we will wait a bit longer in each delay. + * Additional division of cpu speed by 5, because single iteration + * of our delay loop takes 5 cpu cycles (25ns). + */ + unsigned long T = 1000000 / (200 / 5); + + ata_timing_compute(adev, adev->pio_mode, &drv_data->t, T, 0); + if (pair && pair->pio_mode) { + struct ata_timing t; + ata_timing_compute(pair, pair->pio_mode, &t, T, 0); + ata_timing_merge(&t, &drv_data->t, &drv_data->t, + ATA_TIMING_SETUP | ATA_TIMING_8BIT); + } + drv_data->iordy = ata_pio_need_iordy(adev); + + ep93xx_pata_enable_pio(drv_data->ide_base, + adev->pio_mode - XFER_PIO_0); +} + +/* Note: original code is ata_sff_check_status */ +static u8 ep93xx_pata_check_status(struct ata_port *ap) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + + return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_STATUS); +} + +static u8 ep93xx_pata_check_altstatus(struct ata_port *ap) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + + return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_ALTSTATUS); +} + +/* Note: original code is ata_sff_tf_load */ +static void ep93xx_pata_tf_load(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + ep93xx_pata_write_reg(drv_data, tf->hob_feature, + IDECTRL_ADDR_FEATURE); + ep93xx_pata_write_reg(drv_data, tf->hob_nsect, + IDECTRL_ADDR_NSECT); + ep93xx_pata_write_reg(drv_data, tf->hob_lbal, + IDECTRL_ADDR_LBAL); + ep93xx_pata_write_reg(drv_data, tf->hob_lbam, + IDECTRL_ADDR_LBAM); + ep93xx_pata_write_reg(drv_data, tf->hob_lbah, + IDECTRL_ADDR_LBAH); + } + + if (is_addr) { + ep93xx_pata_write_reg(drv_data, tf->feature, + IDECTRL_ADDR_FEATURE); + ep93xx_pata_write_reg(drv_data, tf->nsect, IDECTRL_ADDR_NSECT); + ep93xx_pata_write_reg(drv_data, tf->lbal, IDECTRL_ADDR_LBAL); + ep93xx_pata_write_reg(drv_data, tf->lbam, IDECTRL_ADDR_LBAM); + ep93xx_pata_write_reg(drv_data, tf->lbah, IDECTRL_ADDR_LBAH); + } + + if (tf->flags & ATA_TFLAG_DEVICE) + ep93xx_pata_write_reg(drv_data, tf->device, + IDECTRL_ADDR_DEVICE); + + ata_wait_idle(ap); +} + +/* Note: original code is ata_sff_tf_read */ +static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + + tf->command = ep93xx_pata_check_status(ap); + tf->feature = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE); + tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT); + tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL); + tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM); + tf->lbah = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAH); + tf->device = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DEVICE); + + if (tf->flags & ATA_TFLAG_LBA48) { + ep93xx_pata_write_reg(drv_data, tf->ctl | ATA_HOB, + IDECTRL_ADDR_CTL); + tf->hob_feature = ep93xx_pata_read_reg(drv_data, + IDECTRL_ADDR_FEATURE); + tf->hob_nsect = ep93xx_pata_read_reg(drv_data, + IDECTRL_ADDR_NSECT); + tf->hob_lbal = ep93xx_pata_read_reg(drv_data, + IDECTRL_ADDR_LBAL); + tf->hob_lbam = ep93xx_pata_read_reg(drv_data, + IDECTRL_ADDR_LBAM); + tf->hob_lbah = ep93xx_pata_read_reg(drv_data, + IDECTRL_ADDR_LBAH); + ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL); + ap->last_ctl = tf->ctl; + } +} + +/* Note: original code is ata_sff_exec_command */ +static void ep93xx_pata_exec_command(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + + ep93xx_pata_write_reg(drv_data, tf->command, + IDECTRL_ADDR_COMMAND); + ata_sff_pause(ap); +} + +/* Note: original code is ata_sff_dev_select */ +static void ep93xx_pata_dev_select(struct ata_port *ap, unsigned int device) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + u8 tmp = ATA_DEVICE_OBS; + + if (device != 0) + tmp |= ATA_DEV1; + + ep93xx_pata_write_reg(drv_data, tmp, IDECTRL_ADDR_DEVICE); + ata_sff_pause(ap); /* needed; also flushes, for mmio */ +} + +/* Note: original code is ata_sff_set_devctl */ +static void ep93xx_pata_set_devctl(struct ata_port *ap, u8 ctl) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + + ep93xx_pata_write_reg(drv_data, ctl, IDECTRL_ADDR_CTL); +} + +/* Note: original code is ata_sff_data_xfer */ +static unsigned int ep93xx_pata_data_xfer(struct ata_device *adev, + unsigned char *buf, + unsigned int buflen, int rw) +{ + struct ata_port *ap = adev->link->ap; + struct ep93xx_pata_data *drv_data = ap->host->private_data; + u16 *data = (u16 *)buf; + unsigned int words = buflen >> 1; + + /* Transfer multiple of 2 bytes */ + while (words--) + if (rw == READ) + *data++ = cpu_to_le16( + ep93xx_pata_read_data( + drv_data, IDECTRL_ADDR_DATA)); + else + ep93xx_pata_write_data(drv_data, le16_to_cpu(*data++), + IDECTRL_ADDR_DATA); + + /* Transfer trailing 1 byte, if any. */ + if (unlikely(buflen & 0x01)) { + unsigned char pad[2] = { }; + + buf += buflen - 1; + + if (rw == READ) { + *pad = cpu_to_le16( + ep93xx_pata_read_data( + drv_data, IDECTRL_ADDR_DATA)); + *buf = pad[0]; + } else { + pad[0] = *buf; + ep93xx_pata_write_data(drv_data, le16_to_cpu(*pad), + IDECTRL_ADDR_DATA); + } + words++; + } + + return words << 1; +} + +/* Note: original code is ata_devchk */ +static bool ep93xx_pata_device_is_present(struct ata_port *ap, + unsigned int device) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + u8 nsect, lbal; + + ap->ops->sff_dev_select(ap, device); + + ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT); + ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL); + + ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_NSECT); + ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_LBAL); + + ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT); + ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL); + + nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT); + lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return true; + + return false; +} + +/* Note: original code is ata_sff_wait_after_reset */ +static int ep93xx_pata_wait_after_reset(struct ata_link *link, + unsigned int devmask, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct ep93xx_pata_data *drv_data = ap->host->private_data; + unsigned int dev0 = devmask & (1 << 0); + unsigned int dev1 = devmask & (1 << 1); + int rc, ret = 0; + + ata_msleep(ap, ATA_WAIT_AFTER_RESET); + + /* always check readiness of the master device */ + rc = ata_sff_wait_ready(link, deadline); + /* + * -ENODEV means the odd clown forgot the D7 pulldown resistor + * and TF status is 0xff, bail out on it too. + */ + if (rc) + return rc; + + /* + * if device 1 was found in ata_devchk, wait for register + * access briefly, then wait for BSY to clear. + */ + if (dev1) { + int i; + + ap->ops->sff_dev_select(ap, 1); + + /* + * Wait for register access. Some ATAPI devices fail + * to set nsect/lbal after reset, so don't waste too + * much time on it. We're gonna wait for !BSY anyway. + */ + for (i = 0; i < 2; i++) { + u8 nsect, lbal; + + nsect = ep93xx_pata_read_reg(drv_data, + IDECTRL_ADDR_NSECT); + lbal = ep93xx_pata_read_reg(drv_data, + IDECTRL_ADDR_LBAL); + if (nsect == 1 && lbal == 1) + break; + msleep(50); /* give drive a breather */ + } + + rc = ata_sff_wait_ready(link, deadline); + if (rc) { + if (rc != -ENODEV) + return rc; + ret = rc; + } + } + /* is all this really necessary? */ + ap->ops->sff_dev_select(ap, 0); + if (dev1) + ap->ops->sff_dev_select(ap, 1); + if (dev0) + ap->ops->sff_dev_select(ap, 0); + + return ret; +} + +/* Note: original code is ata_bus_softreset */ +static int ep93xx_pata_bus_softreset(struct ata_port *ap, unsigned int devmask, + unsigned long deadline) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + + ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL); + udelay(20); /* FIXME: flush */ + ep93xx_pata_write_reg(drv_data, ap->ctl | ATA_SRST, IDECTRL_ADDR_CTL); + udelay(20); /* FIXME: flush */ + ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL); + ap->last_ctl = ap->ctl; + + return ep93xx_pata_wait_after_reset(&ap->link, devmask, deadline); +} + +static void ep93xx_pata_release_dma(struct ep93xx_pata_data *drv_data) +{ + if (drv_data->dma_rx_channel) { + dma_release_channel(drv_data->dma_rx_channel); + drv_data->dma_rx_channel = NULL; + } + if (drv_data->dma_tx_channel) { + dma_release_channel(drv_data->dma_tx_channel); + drv_data->dma_tx_channel = NULL; + } +} + +static bool ep93xx_pata_dma_filter(struct dma_chan *chan, void *filter_param) +{ + if (ep93xx_dma_chan_is_m2p(chan)) + return false; + + chan->private = filter_param; + return true; +} + +static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) +{ + const struct platform_device *pdev = drv_data->pdev; + dma_cap_mask_t mask; + struct dma_slave_config conf; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* + * Request two channels for IDE. Another possibility would be + * to request only one channel, and reprogram it's direction at + * start of new transfer. + */ + drv_data->dma_rx_data.port = EP93XX_DMA_IDE; + drv_data->dma_rx_data.direction = DMA_FROM_DEVICE; + drv_data->dma_rx_data.name = "ep93xx-pata-rx"; + drv_data->dma_rx_channel = dma_request_channel(mask, + ep93xx_pata_dma_filter, &drv_data->dma_rx_data); + if (!drv_data->dma_rx_channel) + return; + + drv_data->dma_tx_data.port = EP93XX_DMA_IDE; + drv_data->dma_tx_data.direction = DMA_TO_DEVICE; + drv_data->dma_tx_data.name = "ep93xx-pata-tx"; + drv_data->dma_tx_channel = dma_request_channel(mask, + ep93xx_pata_dma_filter, &drv_data->dma_tx_data); + if (!drv_data->dma_tx_channel) { + dma_release_channel(drv_data->dma_rx_channel); + return; + } + + /* Configure receive channel direction and source address */ + memset(&conf, 0, sizeof(conf)); + conf.direction = DMA_FROM_DEVICE; + conf.src_addr = drv_data->udma_in_phys; + conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) { + dev_err(&pdev->dev, "failed to configure rx dma channel\n"); + ep93xx_pata_release_dma(drv_data); + return; + } + + /* Configure transmit channel direction and destination address */ + memset(&conf, 0, sizeof(conf)); + conf.direction = DMA_TO_DEVICE; + conf.dst_addr = drv_data->udma_out_phys; + conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) { + dev_err(&pdev->dev, "failed to configure tx dma channel\n"); + ep93xx_pata_release_dma(drv_data); + } +} + +static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc) +{ + struct dma_async_tx_descriptor *txd; + struct ep93xx_pata_data *drv_data = qc->ap->host->private_data; + void __iomem *base = drv_data->ide_base; + struct ata_device *adev = qc->dev; + u32 v = qc->dma_dir == DMA_TO_DEVICE ? IDEUDMAOP_RWOP : 0; + struct dma_chan *channel = qc->dma_dir == DMA_TO_DEVICE + ? drv_data->dma_tx_channel : drv_data->dma_rx_channel; + + txd = dmaengine_prep_slave_sg(channel, qc->sg, qc->n_elem, qc->dma_dir, + DMA_CTRL_ACK); + if (!txd) { + dev_err(qc->ap->dev, "failed to prepare slave for sg dma\n"); + return; + } + txd->callback = NULL; + txd->callback_param = NULL; + + if (dmaengine_submit(txd) < 0) { + dev_err(qc->ap->dev, "failed to submit dma transfer\n"); + return; + } + dma_async_issue_pending(channel); + + /* + * When enabling UDMA operation, IDEUDMAOP register needs to be + * programmed in three step sequence: + * 1) set or clear the RWOP bit, + * 2) perform dummy read of the register, + * 3) set the UEN bit. + */ + writel(v, base + IDEUDMAOP); + readl(base + IDEUDMAOP); + writel(v | IDEUDMAOP_UEN, base + IDEUDMAOP); + + writel(IDECFG_IDEEN | IDECFG_UDMA | + ((adev->xfer_mode - XFER_UDMA_0) << IDECFG_MODE_SHIFT), + base + IDECFG); +} + +static void ep93xx_pata_dma_stop(struct ata_queued_cmd *qc) +{ + struct ep93xx_pata_data *drv_data = qc->ap->host->private_data; + void __iomem *base = drv_data->ide_base; + + /* terminate all dma transfers, if not yet finished */ + dmaengine_terminate_all(drv_data->dma_rx_channel); + dmaengine_terminate_all(drv_data->dma_tx_channel); + + /* + * To properly stop IDE-DMA, IDEUDMAOP register must to be cleared + * and IDECTRL register must be set to default value. + */ + writel(0, base + IDEUDMAOP); + writel(readl(base + IDECTRL) | IDECTRL_DIOWN | IDECTRL_DIORN | + IDECTRL_CS0N | IDECTRL_CS1N, base + IDECTRL); + + ep93xx_pata_enable_pio(drv_data->ide_base, + qc->dev->pio_mode - XFER_PIO_0); + + ata_sff_dma_pause(qc->ap); +} + +static void ep93xx_pata_dma_setup(struct ata_queued_cmd *qc) +{ + qc->ap->ops->sff_exec_command(qc->ap, &qc->tf); +} + +static u8 ep93xx_pata_dma_status(struct ata_port *ap) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + u32 val = readl(drv_data->ide_base + IDEUDMASTS); + + /* + * UDMA Status Register bits: + * + * DMAIDE - DMA request signal from UDMA state machine, + * INTIDE - INT line generated by UDMA because of errors in the + * state machine, + * SBUSY - UDMA state machine busy, not in idle state, + * NDO - error for data-out not completed, + * NDI - error for data-in not completed, + * N4X - error for data transferred not multiplies of four + * 32-bit words. + * (EP93xx UG p27-17) + */ + if (val & IDEUDMASTS_NDO || val & IDEUDMASTS_NDI || + val & IDEUDMASTS_N4X || val & IDEUDMASTS_INTIDE) + return ATA_DMA_ERR; + + /* read INTRQ (INT[3]) pin input state */ + if (readl(drv_data->ide_base + IDECTRL) & IDECTRL_INTRQ) + return ATA_DMA_INTR; + + if (val & IDEUDMASTS_SBUSY || val & IDEUDMASTS_DMAIDE) + return ATA_DMA_ACTIVE; + + return 0; +} + +/* Note: original code is ata_sff_softreset */ +static int ep93xx_pata_softreset(struct ata_link *al, unsigned int *classes, + unsigned long deadline) +{ + struct ata_port *ap = al->ap; + unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; + unsigned int devmask = 0; + int rc; + u8 err; + + /* determine if device 0/1 are present */ + if (ep93xx_pata_device_is_present(ap, 0)) + devmask |= (1 << 0); + if (slave_possible && ep93xx_pata_device_is_present(ap, 1)) + devmask |= (1 << 1); + + /* select device 0 again */ + ap->ops->sff_dev_select(al->ap, 0); + + /* issue bus reset */ + rc = ep93xx_pata_bus_softreset(ap, devmask, deadline); + /* if link is ocuppied, -ENODEV too is an error */ + if (rc && (rc != -ENODEV || sata_scr_valid(al))) { + ata_link_err(al, "SRST failed (errno=%d)\n", rc); + return rc; + } + + /* determine by signature whether we have ATA or ATAPI devices */ + classes[0] = ata_sff_dev_classify(&al->device[0], devmask & (1 << 0), + &err); + if (slave_possible && err != 0x81) + classes[1] = ata_sff_dev_classify(&al->device[1], + devmask & (1 << 1), &err); + + return 0; +} + +/* Note: original code is ata_sff_drain_fifo */ +static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc) +{ + int count; + struct ata_port *ap; + struct ep93xx_pata_data *drv_data; + + /* We only need to flush incoming data when a command was running */ + if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) + return; + + ap = qc->ap; + drv_data = ap->host->private_data; + /* Drain up to 64K of data before we give up this recovery method */ + for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) + && count < 65536; count += 2) + ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DATA); + + /* Can become DEBUG later */ + if (count) + ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n", count); + +} + +static int ep93xx_pata_port_start(struct ata_port *ap) +{ + struct ep93xx_pata_data *drv_data = ap->host->private_data; + + /* + * Set timings to safe values at startup (= number of ns from ATA + * specification), we'll switch to properly calculated values later. + */ + drv_data->t = *ata_timing_find_mode(XFER_PIO_0); + return 0; +} + +static struct scsi_host_template ep93xx_pata_sht = { + ATA_BASE_SHT(DRV_NAME), + /* ep93xx dma implementation limit */ + .sg_tablesize = 32, + /* ep93xx dma can't transfer 65536 bytes at once */ + .dma_boundary = 0x7fff, +}; + +static struct ata_port_operations ep93xx_pata_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .qc_prep = ata_noop_qc_prep, + + .softreset = ep93xx_pata_softreset, + .hardreset = ATA_OP_NULL, + + .sff_dev_select = ep93xx_pata_dev_select, + .sff_set_devctl = ep93xx_pata_set_devctl, + .sff_check_status = ep93xx_pata_check_status, + .sff_check_altstatus = ep93xx_pata_check_altstatus, + .sff_tf_load = ep93xx_pata_tf_load, + .sff_tf_read = ep93xx_pata_tf_read, + .sff_exec_command = ep93xx_pata_exec_command, + .sff_data_xfer = ep93xx_pata_data_xfer, + .sff_drain_fifo = ep93xx_pata_drain_fifo, + .sff_irq_clear = ATA_OP_NULL, + + .set_piomode = ep93xx_pata_set_piomode, + + .bmdma_setup = ep93xx_pata_dma_setup, + .bmdma_start = ep93xx_pata_dma_start, + .bmdma_stop = ep93xx_pata_dma_stop, + .bmdma_status = ep93xx_pata_dma_status, + + .cable_detect = ata_cable_unknown, + .port_start = ep93xx_pata_port_start, +}; + +static int ep93xx_pata_probe(struct platform_device *pdev) +{ + struct ep93xx_pata_data *drv_data; + struct ata_host *host; + struct ata_port *ap; + int irq; + struct resource *mem_res; + void __iomem *ide_base; + int err; + + err = ep93xx_ide_acquire_gpio(pdev); + if (err) + return err; + + /* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + err = -ENXIO; + goto err_rel_gpio; + } + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ide_base = devm_ioremap_resource(&pdev->dev, mem_res); + if (IS_ERR(ide_base)) { + err = PTR_ERR(ide_base); + goto err_rel_gpio; + } + + drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL); + if (!drv_data) { + err = -ENXIO; + goto err_rel_gpio; + } + + platform_set_drvdata(pdev, drv_data); + drv_data->pdev = pdev; + drv_data->ide_base = ide_base; + drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN; + drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT; + ep93xx_pata_dma_init(drv_data); + + /* allocate host */ + host = ata_host_alloc(&pdev->dev, 1); + if (!host) { + err = -ENXIO; + goto err_rel_dma; + } + + ep93xx_pata_clear_regs(ide_base); + + host->private_data = drv_data; + + ap = host->ports[0]; + ap->dev = &pdev->dev; + ap->ops = &ep93xx_pata_port_ops; + ap->flags |= ATA_FLAG_SLAVE_POSS; + ap->pio_mask = ATA_PIO4; + + /* + * Maximum UDMA modes: + * EP931x rev.E0 - UDMA2 + * EP931x rev.E1 - UDMA3 + * EP931x rev.E2 - UDMA4 + * + * MWDMA support was removed from EP931x rev.E2, + * so this driver supports only UDMA modes. + */ + if (drv_data->dma_rx_channel && drv_data->dma_tx_channel) { + int chip_rev = ep93xx_chip_revision(); + + if (chip_rev == EP93XX_CHIP_REV_E1) + ap->udma_mask = ATA_UDMA3; + else if (chip_rev == EP93XX_CHIP_REV_E2) + ap->udma_mask = ATA_UDMA4; + else + ap->udma_mask = ATA_UDMA2; + } + + /* defaults, pio 0 */ + ep93xx_pata_enable_pio(ide_base, 0); + + dev_info(&pdev->dev, "version " DRV_VERSION "\n"); + + /* activate host */ + err = ata_host_activate(host, irq, ata_bmdma_interrupt, 0, + &ep93xx_pata_sht); + if (err == 0) + return 0; + +err_rel_dma: + ep93xx_pata_release_dma(drv_data); +err_rel_gpio: + ep93xx_ide_release_gpio(pdev); + return err; +} + +static int ep93xx_pata_remove(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + struct ep93xx_pata_data *drv_data = host->private_data; + + ata_host_detach(host); + ep93xx_pata_release_dma(drv_data); + ep93xx_pata_clear_regs(drv_data->ide_base); + ep93xx_ide_release_gpio(pdev); + return 0; +} + +static struct platform_driver ep93xx_pata_platform_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = ep93xx_pata_probe, + .remove = ep93xx_pata_remove, +}; + +module_platform_driver(ep93xx_pata_platform_driver); + +MODULE_AUTHOR("Alessandro Zummo, Lennert Buytenhek, Joao Ramos, " + "Bartlomiej Zolnierkiewicz, Rafal Prylowski"); +MODULE_DESCRIPTION("low-level driver for cirrus ep93xx IDE controller"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("platform:pata_ep93xx"); diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index cf656ecbe50..cbc3de793d1 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -11,49 +11,45 @@ * * * TODO - * Maybe PLL mode - * Look into engine reset on timeout errors. Should not be - * required. + * Look into engine reset on timeout errors. Should not be required. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_hpt366" -#define DRV_VERSION "0.5" +#define DRV_VERSION "0.6.11" struct hpt_clock { - u8 xfer_speed; + u8 xfer_mode; u32 timing; }; /* key for bus clock timings * bit - * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW - * DMA. cycles = value + 1 - * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW - * DMA. cycles = value + 1 - * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file + * 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA. + * cycles = value + 1 + * 4:7 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA. + * cycles = value + 1 + * 8:11 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file * register access. - * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file + * 12:15 cmd_low_time. Active time of DIOW_/DIOR_ during task file * register access. - * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer. - * during task file register access. - * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA - * xfer. - * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task + * 16:18 udma_cycle_time. Clock cycles for UDMA xfer? + * 19:21 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer. + * 22:24 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file * register access. - * 28 UDMA enable - * 29 DMA enable - * 30 PIO_MST enable. if set, the chip is in bus master mode during - * PIO. + * 28 UDMA enable. + * 29 DMA enable. + * 30 PIO_MST enable. If set, the chip is in bus master mode during + * PIO xfer. * 31 FIFO enable. */ @@ -114,18 +110,45 @@ static const struct hpt_clock hpt366_25[] = { { 0, 0x01208585 } }; -static const char *bad_ata33[] = { - "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2", - "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2", - "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4", +/** + * hpt36x_find_mode - find the hpt36x timing + * @ap: ATA port + * @speed: transfer mode + * + * Return the 32bit register programming information for this channel + * that matches the speed provided. + */ + +static u32 hpt36x_find_mode(struct ata_port *ap, int speed) +{ + struct hpt_clock *clocks = ap->host->private_data; + + while (clocks->xfer_mode) { + if (clocks->xfer_mode == speed) + return clocks->timing; + clocks++; + } + BUG(); + return 0xffffffffU; /* silence compiler warning */ +} + +static const char * const bad_ata33[] = { + "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", + "Maxtor 90845U3", "Maxtor 90650U2", + "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", + "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2", + "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", + "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4", "Maxtor 90510D4", "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2", - "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4", - "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2", + "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", + "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4", + "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", + "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2", NULL }; -static const char *bad_ata66_4[] = { +static const char * const bad_ata66_4[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", "IBM-DTLA-307045", @@ -144,31 +167,22 @@ static const char *bad_ata66_4[] = { NULL }; -static const char *bad_ata66_3[] = { +static const char * const bad_ata66_3[] = { "WDC AC310200R", NULL }; -static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[]) +static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, + const char * const list[]) { - unsigned char model_num[40]; - char *s; - unsigned int len; + unsigned char model_num[ATA_ID_PROD_LEN + 1]; int i = 0; - ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num)); - s = &model_num[0]; - len = strnlen(s, sizeof(model_num)); + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); - /* ATAPI specifies that empty space is blank-filled; remove blanks */ - while ((len > 0) && (s[len - 1] == ' ')) { - len--; - s[len] = 0; - } - - while(list[i] != NULL) { - if (!strncmp(list[i], s, len)) { - printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n", + while (list[i] != NULL) { + if (!strcmp(list[i], model_num)) { + pr_warn("%s is not supported for %s\n", modestr, list[i]); return 1; } @@ -179,70 +193,66 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, cons /** * hpt366_filter - mode selection filter - * @ap: ATA interface * @adev: ATA device * * Block UDMA on devices that cause trouble with this controller. */ -static unsigned long hpt366_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) +static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask) { if (adev->class == ATA_DEV_ATA) { if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) mask &= ~ATA_MASK_UDMA; if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3)) - mask &= ~(0x07 << ATA_SHIFT_UDMA); + mask &= ~(0xF8 << ATA_SHIFT_UDMA); if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) - mask &= ~(0x0F << ATA_SHIFT_UDMA); - } - return ata_pci_default_filter(ap, adev, mask); -} + mask &= ~(0xF0 << ATA_SHIFT_UDMA); + } else if (adev->class == ATA_DEV_ATAPI) + mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); -/** - * hpt36x_find_mode - reset the hpt36x bus - * @ap: ATA port - * @speed: transfer mode - * - * Return the 32bit register programming information for this channel - * that matches the speed provided. - */ - -static u32 hpt36x_find_mode(struct ata_port *ap, int speed) -{ - struct hpt_clock *clocks = ap->host->private_data; - - while(clocks->xfer_speed) { - if (clocks->xfer_speed == speed) - return clocks->timing; - clocks++; - } - BUG(); - return 0xffffffffU; /* silence compiler warning */ + return mask; } -static int hpt36x_pre_reset(struct ata_port *ap) +static int hpt36x_cable_detect(struct ata_port *ap) { - u8 ata66; struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u8 ata66; + /* + * Each channel of pata_hpt366 occupies separate PCI function + * as the primary channel and bit1 indicates the cable type. + */ pci_read_config_byte(pdev, 0x5A, &ata66); - if (ata66 & (1 << ap->port_no)) - ap->cbl = ATA_CBL_PATA40; - else - ap->cbl = ATA_CBL_PATA80; - return ata_std_prereset(ap); + if (ata66 & 2) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; } -/** - * hpt36x_error_handler - reset the hpt36x bus - * @ap: ATA port to reset - * - * Perform the reset handling for the 366/368 - */ - -static void hpt36x_error_handler(struct ata_port *ap) +static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev, + u8 mode) { - ata_bmdma_drive_eh(ap, hpt36x_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u32 addr = 0x40 + 4 * adev->devno; + u32 mask, reg, t; + + /* determine timing mask and find matching clock entry */ + if (mode < XFER_MW_DMA_0) + mask = 0xc1f8ffff; + else if (mode < XFER_UDMA_0) + mask = 0x303800ff; + else + mask = 0x30070000; + + t = hpt36x_find_mode(ap, mode); + + /* + * Combine new mode bits with old config bits and disable + * on-chip PIO FIFO/buffer (and PIO MST mode as well) to avoid + * problems handling I/O errors later. + */ + pci_read_config_dword(pdev, addr, ®); + reg = ((reg & ~mask) | (t & mask)) & ~0xc0000000; + pci_write_config_dword(pdev, addr, reg); } /** @@ -255,28 +265,7 @@ static void hpt36x_error_handler(struct ata_port *ap) static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 addr1, addr2; - u32 reg; - u32 mode; - u8 fast; - - addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); - addr2 = 0x51 + 4 * ap->port_no; - - /* Fast interrupt prediction disable, hold off interrupt disable */ - pci_read_config_byte(pdev, addr2, &fast); - if (fast & 0x80) { - fast &= ~0x80; - pci_write_config_byte(pdev, addr2, fast); - } - - pci_read_config_dword(pdev, addr1, ®); - mode = hpt36x_find_mode(ap, adev->pio_mode); - mode &= ~0x8000000; /* No FIFO in PIO */ - mode &= ~0x30070000; /* Leave config bits alone */ - reg &= 0x30070000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); + hpt366_set_mode(ap, adev, adev->pio_mode); } /** @@ -290,46 +279,11 @@ static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev) static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 addr1, addr2; - u32 reg; - u32 mode; - u8 fast; - - addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); - addr2 = 0x51 + 4 * ap->port_no; - - /* Fast interrupt prediction disable, hold off interrupt disable */ - pci_read_config_byte(pdev, addr2, &fast); - if (fast & 0x80) { - fast &= ~0x80; - pci_write_config_byte(pdev, addr2, fast); - } - - pci_read_config_dword(pdev, addr1, ®); - mode = hpt36x_find_mode(ap, adev->dma_mode); - mode |= 0x8000000; /* FIFO in MWDMA or UDMA */ - mode &= ~0xC0000000; /* Leave config bits alone */ - reg &= 0xC0000000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); + hpt366_set_mode(ap, adev, adev->dma_mode); } static struct scsi_host_template hpt36x_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; /* @@ -337,39 +291,34 @@ static struct scsi_host_template hpt36x_sht = { */ static struct ata_port_operations hpt366_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_bmdma_port_ops, + .cable_detect = hpt36x_cable_detect, + .mode_filter = hpt366_filter, .set_piomode = hpt366_set_piomode, .set_dmamode = hpt366_set_dmamode, - .mode_filter = hpt366_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = hpt36x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, +}; - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, +/** + * hpt36x_init_chipset - common chip setup + * @dev: PCI device + * + * Perform the chip setup work that must be done at both init and + * resume time + */ - .data_xfer = ata_pio_data_xfer, +static void hpt36x_init_chipset(struct pci_dev *dev) +{ + u8 drive_fast; - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); + pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); + pci_write_config_byte(dev, PCI_MAX_LAT, 0x08); - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop -}; + pci_read_config_byte(dev, 0x51, &drive_fast); + if (drive_fast & 0x80) + pci_write_config_byte(dev, 0x51, drive_fast & ~0x80); +} /** * hpt36x_init_one - Initialise an HPT366/368 @@ -394,85 +343,84 @@ static struct ata_port_operations hpt366_port_ops = { static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - static struct ata_port_info info_hpt366 = { - .sht = &hpt36x_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x1f, + static const struct ata_port_info info_hpt366 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA4, .port_ops = &hpt366_port_ops }; - struct ata_port_info *port_info[2] = {&info_hpt366, &info_hpt366}; + const struct ata_port_info *ppi[] = { &info_hpt366, NULL }; - u32 class_rev; + void *hpriv = NULL; u32 reg1; - u8 drive_fast; + int rc; - pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); - class_rev &= 0xFF; + rc = pcim_enable_device(dev); + if (rc) + return rc; /* May be a later chip in disguise. Check */ /* Newer chips are not in the HPT36x driver. Ignore them */ - if (class_rev > 2) - return -ENODEV; - - pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); - pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); - pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); - pci_write_config_byte(dev, PCI_MAX_LAT, 0x08); + if (dev->revision > 2) + return -ENODEV; - pci_read_config_byte(dev, 0x51, &drive_fast); - if (drive_fast & 0x80) - pci_write_config_byte(dev, 0x51, drive_fast & ~0x80); + hpt36x_init_chipset(dev); pci_read_config_dword(dev, 0x40, ®1); /* PCI clocking determines the ATA timing values to use */ /* info_hpt366 is safe against re-entry so we can scribble on it */ - switch(reg1 & 0x700) { - case 5: - info_hpt366.private_data = &hpt366_40; - break; - case 9: - info_hpt366.private_data = &hpt366_25; - break; - default: - info_hpt366.private_data = &hpt366_33; - break; + switch ((reg1 & 0x700) >> 8) { + case 9: + hpriv = &hpt366_40; + break; + case 5: + hpriv = &hpt366_25; + break; + default: + hpriv = &hpt366_33; + break; } /* Now kick off ATA set up */ - return ata_pci_init_one(dev, port_info, 2); + return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); } -static struct pci_device_id hpt36x[] = { - { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), }, - { 0, }, +#ifdef CONFIG_PM_SLEEP +static int hpt36x_reinit_one(struct pci_dev *dev) +{ + struct ata_host *host = pci_get_drvdata(dev); + int rc; + + rc = ata_pci_device_do_resume(dev); + if (rc) + return rc; + hpt36x_init_chipset(dev); + ata_host_resume(host); + return 0; +} +#endif + +static const struct pci_device_id hpt36x[] = { + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), }, + { }, }; static struct pci_driver hpt36x_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = hpt36x, - .probe = hpt36x_init_one, - .remove = ata_pci_remove_one + .probe = hpt36x_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = hpt36x_reinit_one, +#endif }; -static int __init hpt36x_init(void) -{ - return pci_register_driver(&hpt36x_pci_driver); -} - - -static void __exit hpt36x_exit(void) -{ - pci_unregister_driver(&hpt36x_pci_driver); -} - +module_pci_driver(hpt36x_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, hpt36x); MODULE_VERSION(DRV_VERSION); - -module_init(hpt36x_init); -module_exit(hpt36x_exit); diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 10318c0012e..3ba843f5cdc 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -8,24 +8,24 @@ * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> * Portions Copyright (C) 2001 Sun Microsystems, Inc. * Portions Copyright (C) 2003 Red Hat Inc + * Portions Copyright (C) 2005-2010 MontaVista Software, Inc. * * TODO - * PLL mode - * Look into engine reset on timeout errors. Should not be - * required. + * Look into engine reset on timeout errors. Should not be required. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_hpt37x" -#define DRV_VERSION "0.5" +#define DRV_VERSION "0.6.23" struct hpt_clock { u8 xfer_speed; @@ -40,222 +40,95 @@ struct hpt_chip { /* key for bus clock timings * bit - * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW - * DMA. cycles = value + 1 - * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW - * DMA. cycles = value + 1 - * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file + * 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA. + * cycles = value + 1 + * 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA. + * cycles = value + 1 + * 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file * register access. - * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file + * 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file * register access. - * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer. - * during task file register access. - * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA - * xfer. - * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task + * 18:20 udma_cycle_time. Clock cycles for UDMA xfer. + * 21 CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock. + * 22:24 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer. + * 25:27 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file * register access. - * 28 UDMA enable - * 29 DMA enable - * 30 PIO_MST enable. if set, the chip is in bus master mode during - * PIO. - * 31 FIFO enable. + * 28 UDMA enable. + * 29 DMA enable. + * 30 PIO_MST enable. If set, the chip is in bus master mode during + * PIO xfer. + * 31 FIFO enable. Only for PIO. */ -/* from highpoint documentation. these are old values */ -static const struct hpt_clock hpt370_timings_33[] = { -/* { XFER_UDMA_5, 0x1A85F442, 0x16454e31 }, */ - { XFER_UDMA_5, 0x16454e31 }, - { XFER_UDMA_4, 0x16454e31 }, - { XFER_UDMA_3, 0x166d4e31 }, - { XFER_UDMA_2, 0x16494e31 }, - { XFER_UDMA_1, 0x164d4e31 }, - { XFER_UDMA_0, 0x16514e31 }, - - { XFER_MW_DMA_2, 0x26514e21 }, - { XFER_MW_DMA_1, 0x26514e33 }, - { XFER_MW_DMA_0, 0x26514e97 }, - - { XFER_PIO_4, 0x06514e21 }, - { XFER_PIO_3, 0x06514e22 }, - { XFER_PIO_2, 0x06514e33 }, - { XFER_PIO_1, 0x06914e43 }, - { XFER_PIO_0, 0x06914e57 }, - { 0, 0x06514e57 } -}; - -static const struct hpt_clock hpt370_timings_66[] = { - { XFER_UDMA_5, 0x14846231 }, - { XFER_UDMA_4, 0x14886231 }, - { XFER_UDMA_3, 0x148c6231 }, - { XFER_UDMA_2, 0x148c6231 }, - { XFER_UDMA_1, 0x14906231 }, - { XFER_UDMA_0, 0x14986231 }, - - { XFER_MW_DMA_2, 0x26514e21 }, - { XFER_MW_DMA_1, 0x26514e33 }, - { XFER_MW_DMA_0, 0x26514e97 }, - - { XFER_PIO_4, 0x06514e21 }, - { XFER_PIO_3, 0x06514e22 }, - { XFER_PIO_2, 0x06514e33 }, - { XFER_PIO_1, 0x06914e43 }, - { XFER_PIO_0, 0x06914e57 }, - { 0, 0x06514e57 } -}; - -/* these are the current (4 sep 2001) timings from highpoint */ -static const struct hpt_clock hpt370a_timings_33[] = { - { XFER_UDMA_5, 0x12446231 }, - { XFER_UDMA_4, 0x12446231 }, - { XFER_UDMA_3, 0x126c6231 }, - { XFER_UDMA_2, 0x12486231 }, - { XFER_UDMA_1, 0x124c6233 }, - { XFER_UDMA_0, 0x12506297 }, - - { XFER_MW_DMA_2, 0x22406c31 }, - { XFER_MW_DMA_1, 0x22406c33 }, - { XFER_MW_DMA_0, 0x22406c97 }, - - { XFER_PIO_4, 0x06414e31 }, - { XFER_PIO_3, 0x06414e42 }, - { XFER_PIO_2, 0x06414e53 }, - { XFER_PIO_1, 0x06814e93 }, - { XFER_PIO_0, 0x06814ea7 }, - { 0, 0x06814ea7 } -}; - -/* 2x 33MHz timings */ -static const struct hpt_clock hpt370a_timings_66[] = { - { XFER_UDMA_5, 0x1488e673 }, - { XFER_UDMA_4, 0x1488e673 }, - { XFER_UDMA_3, 0x1498e673 }, - { XFER_UDMA_2, 0x1490e673 }, - { XFER_UDMA_1, 0x1498e677 }, - { XFER_UDMA_0, 0x14a0e73f }, - - { XFER_MW_DMA_2, 0x2480fa73 }, - { XFER_MW_DMA_1, 0x2480fa77 }, - { XFER_MW_DMA_0, 0x2480fb3f }, - - { XFER_PIO_4, 0x0c82be73 }, - { XFER_PIO_3, 0x0c82be95 }, - { XFER_PIO_2, 0x0c82beb7 }, - { XFER_PIO_1, 0x0d02bf37 }, - { XFER_PIO_0, 0x0d02bf5f }, - { 0, 0x0d02bf5f } +static struct hpt_clock hpt37x_timings_33[] = { + { XFER_UDMA_6, 0x12446231 }, /* 0x12646231 ?? */ + { XFER_UDMA_5, 0x12446231 }, + { XFER_UDMA_4, 0x12446231 }, + { XFER_UDMA_3, 0x126c6231 }, + { XFER_UDMA_2, 0x12486231 }, + { XFER_UDMA_1, 0x124c6233 }, + { XFER_UDMA_0, 0x12506297 }, + + { XFER_MW_DMA_2, 0x22406c31 }, + { XFER_MW_DMA_1, 0x22406c33 }, + { XFER_MW_DMA_0, 0x22406c97 }, + + { XFER_PIO_4, 0x06414e31 }, + { XFER_PIO_3, 0x06414e42 }, + { XFER_PIO_2, 0x06414e53 }, + { XFER_PIO_1, 0x06814e93 }, + { XFER_PIO_0, 0x06814ea7 } }; -static const struct hpt_clock hpt370a_timings_50[] = { - { XFER_UDMA_5, 0x12848242 }, - { XFER_UDMA_4, 0x12ac8242 }, - { XFER_UDMA_3, 0x128c8242 }, - { XFER_UDMA_2, 0x120c8242 }, - { XFER_UDMA_1, 0x12148254 }, - { XFER_UDMA_0, 0x121882ea }, - - { XFER_MW_DMA_2, 0x22808242 }, - { XFER_MW_DMA_1, 0x22808254 }, - { XFER_MW_DMA_0, 0x228082ea }, - - { XFER_PIO_4, 0x0a81f442 }, - { XFER_PIO_3, 0x0a81f443 }, - { XFER_PIO_2, 0x0a81f454 }, - { XFER_PIO_1, 0x0ac1f465 }, - { XFER_PIO_0, 0x0ac1f48a }, - { 0, 0x0ac1f48a } +static struct hpt_clock hpt37x_timings_50[] = { + { XFER_UDMA_6, 0x12848242 }, + { XFER_UDMA_5, 0x12848242 }, + { XFER_UDMA_4, 0x12ac8242 }, + { XFER_UDMA_3, 0x128c8242 }, + { XFER_UDMA_2, 0x120c8242 }, + { XFER_UDMA_1, 0x12148254 }, + { XFER_UDMA_0, 0x121882ea }, + + { XFER_MW_DMA_2, 0x22808242 }, + { XFER_MW_DMA_1, 0x22808254 }, + { XFER_MW_DMA_0, 0x228082ea }, + + { XFER_PIO_4, 0x0a81f442 }, + { XFER_PIO_3, 0x0a81f443 }, + { XFER_PIO_2, 0x0a81f454 }, + { XFER_PIO_1, 0x0ac1f465 }, + { XFER_PIO_0, 0x0ac1f48a } }; -static const struct hpt_clock hpt372_timings_33[] = { - { XFER_UDMA_6, 0x1c81dc62 }, - { XFER_UDMA_5, 0x1c6ddc62 }, - { XFER_UDMA_4, 0x1c8ddc62 }, - { XFER_UDMA_3, 0x1c8edc62 }, /* checkme */ - { XFER_UDMA_2, 0x1c91dc62 }, - { XFER_UDMA_1, 0x1c9adc62 }, /* checkme */ - { XFER_UDMA_0, 0x1c82dc62 }, /* checkme */ - - { XFER_MW_DMA_2, 0x2c829262 }, - { XFER_MW_DMA_1, 0x2c829266 }, /* checkme */ - { XFER_MW_DMA_0, 0x2c82922e }, /* checkme */ - - { XFER_PIO_4, 0x0c829c62 }, - { XFER_PIO_3, 0x0c829c84 }, - { XFER_PIO_2, 0x0c829ca6 }, - { XFER_PIO_1, 0x0d029d26 }, - { XFER_PIO_0, 0x0d029d5e }, - { 0, 0x0d029d5e } +static struct hpt_clock hpt37x_timings_66[] = { + { XFER_UDMA_6, 0x1c869c62 }, + { XFER_UDMA_5, 0x1cae9c62 }, /* 0x1c8a9c62 */ + { XFER_UDMA_4, 0x1c8a9c62 }, + { XFER_UDMA_3, 0x1c8e9c62 }, + { XFER_UDMA_2, 0x1c929c62 }, + { XFER_UDMA_1, 0x1c9a9c62 }, + { XFER_UDMA_0, 0x1c829c62 }, + + { XFER_MW_DMA_2, 0x2c829c62 }, + { XFER_MW_DMA_1, 0x2c829c66 }, + { XFER_MW_DMA_0, 0x2c829d2e }, + + { XFER_PIO_4, 0x0c829c62 }, + { XFER_PIO_3, 0x0c829c84 }, + { XFER_PIO_2, 0x0c829ca6 }, + { XFER_PIO_1, 0x0d029d26 }, + { XFER_PIO_0, 0x0d029d5e } }; -static const struct hpt_clock hpt372_timings_50[] = { - { XFER_UDMA_5, 0x12848242 }, - { XFER_UDMA_4, 0x12ac8242 }, - { XFER_UDMA_3, 0x128c8242 }, - { XFER_UDMA_2, 0x120c8242 }, - { XFER_UDMA_1, 0x12148254 }, - { XFER_UDMA_0, 0x121882ea }, - - { XFER_MW_DMA_2, 0x22808242 }, - { XFER_MW_DMA_1, 0x22808254 }, - { XFER_MW_DMA_0, 0x228082ea }, - - { XFER_PIO_4, 0x0a81f442 }, - { XFER_PIO_3, 0x0a81f443 }, - { XFER_PIO_2, 0x0a81f454 }, - { XFER_PIO_1, 0x0ac1f465 }, - { XFER_PIO_0, 0x0ac1f48a }, - { 0, 0x0a81f443 } -}; - -static const struct hpt_clock hpt372_timings_66[] = { - { XFER_UDMA_6, 0x1c869c62 }, - { XFER_UDMA_5, 0x1cae9c62 }, - { XFER_UDMA_4, 0x1c8a9c62 }, - { XFER_UDMA_3, 0x1c8e9c62 }, - { XFER_UDMA_2, 0x1c929c62 }, - { XFER_UDMA_1, 0x1c9a9c62 }, - { XFER_UDMA_0, 0x1c829c62 }, - - { XFER_MW_DMA_2, 0x2c829c62 }, - { XFER_MW_DMA_1, 0x2c829c66 }, - { XFER_MW_DMA_0, 0x2c829d2e }, - - { XFER_PIO_4, 0x0c829c62 }, - { XFER_PIO_3, 0x0c829c84 }, - { XFER_PIO_2, 0x0c829ca6 }, - { XFER_PIO_1, 0x0d029d26 }, - { XFER_PIO_0, 0x0d029d5e }, - { 0, 0x0d029d26 } -}; - -static const struct hpt_clock hpt374_timings_33[] = { - { XFER_UDMA_6, 0x12808242 }, - { XFER_UDMA_5, 0x12848242 }, - { XFER_UDMA_4, 0x12ac8242 }, - { XFER_UDMA_3, 0x128c8242 }, - { XFER_UDMA_2, 0x120c8242 }, - { XFER_UDMA_1, 0x12148254 }, - { XFER_UDMA_0, 0x121882ea }, - - { XFER_MW_DMA_2, 0x22808242 }, - { XFER_MW_DMA_1, 0x22808254 }, - { XFER_MW_DMA_0, 0x228082ea }, - - { XFER_PIO_4, 0x0a81f442 }, - { XFER_PIO_3, 0x0a81f443 }, - { XFER_PIO_2, 0x0a81f454 }, - { XFER_PIO_1, 0x0ac1f465 }, - { XFER_PIO_0, 0x0ac1f48a }, - { 0, 0x06814e93 } -}; static const struct hpt_chip hpt370 = { "HPT370", 48, { - hpt370_timings_33, + hpt37x_timings_33, NULL, NULL, - hpt370_timings_66 + NULL } }; @@ -263,10 +136,10 @@ static const struct hpt_chip hpt370a = { "HPT370A", 48, { - hpt370a_timings_33, + hpt37x_timings_33, NULL, - hpt370a_timings_50, - hpt370a_timings_66 + hpt37x_timings_50, + NULL } }; @@ -274,10 +147,10 @@ static const struct hpt_chip hpt372 = { "HPT372", 55, { - hpt372_timings_33, + hpt37x_timings_33, NULL, - hpt372_timings_50, - hpt372_timings_66 + hpt37x_timings_50, + hpt37x_timings_66 } }; @@ -285,10 +158,10 @@ static const struct hpt_chip hpt302 = { "HPT302", 66, { - hpt372_timings_33, + hpt37x_timings_33, NULL, - hpt372_timings_50, - hpt372_timings_66 + hpt37x_timings_50, + hpt37x_timings_66 } }; @@ -296,10 +169,10 @@ static const struct hpt_chip hpt371 = { "HPT371", 66, { - hpt372_timings_33, + hpt37x_timings_33, NULL, - hpt372_timings_50, - hpt372_timings_66 + hpt37x_timings_50, + hpt37x_timings_66 } }; @@ -307,10 +180,10 @@ static const struct hpt_chip hpt372a = { "HPT372A", 66, { - hpt372_timings_33, + hpt37x_timings_33, NULL, - hpt372_timings_50, - hpt372_timings_66 + hpt37x_timings_50, + hpt37x_timings_66 } }; @@ -318,7 +191,7 @@ static const struct hpt_chip hpt374 = { "HPT374", 48, { - hpt374_timings_33, + hpt37x_timings_33, NULL, NULL, NULL @@ -338,7 +211,7 @@ static u32 hpt37x_find_mode(struct ata_port *ap, int speed) { struct hpt_clock *clocks = ap->host->private_data; - while(clocks->xfer_speed) { + while (clocks->xfer_speed) { if (clocks->xfer_speed == speed) return clocks->timing; clocks++; @@ -347,27 +220,17 @@ static u32 hpt37x_find_mode(struct ata_port *ap, int speed) return 0xffffffffU; /* silence compiler warning */ } -static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[]) +static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, + const char * const list[]) { - unsigned char model_num[40]; - char *s; - unsigned int len; + unsigned char model_num[ATA_ID_PROD_LEN + 1]; int i = 0; - ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, - sizeof(model_num)); - s = &model_num[0]; - len = strnlen(s, sizeof(model_num)); + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); - /* ATAPI specifies that empty space is blank-filled; remove blanks */ - while ((len > 0) && (s[len - 1] == ' ')) { - len--; - s[len] = 0; - } - - while(list[i] != NULL) { - if (!strncmp(list[i], s, len)) { - printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n", + while (list[i] != NULL) { + if (!strcmp(list[i], model_num)) { + pr_warn("%s is not supported for %s\n", modestr, list[i]); return 1; } @@ -376,18 +239,23 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, cons return 0; } -static const char *bad_ata33[] = { - "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2", - "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2", - "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4", +static const char * const bad_ata33[] = { + "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", + "Maxtor 90845U3", "Maxtor 90650U2", + "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", + "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2", + "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", + "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4", "Maxtor 90510D4", "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2", - "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4", - "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2", + "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", + "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4", + "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", + "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2", NULL }; -static const char *bad_ata100_5[] = { +static const char * const bad_ata100_5[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", "IBM-DTLA-307045", @@ -408,149 +276,143 @@ static const char *bad_ata100_5[] = { /** * hpt370_filter - mode selection filter - * @ap: ATA interface * @adev: ATA device * * Block UDMA on devices that cause trouble with this controller. */ -static unsigned long hpt370_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) +static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask) { - if (adev->class != ATA_DEV_ATA) { + if (adev->class == ATA_DEV_ATA) { if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) mask &= ~ATA_MASK_UDMA; if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) - mask &= ~(0x1F << ATA_SHIFT_UDMA); + mask &= ~(0xE0 << ATA_SHIFT_UDMA); } - return ata_pci_default_filter(ap, adev, mask); + return mask; } /** * hpt370a_filter - mode selection filter - * @ap: ATA interface * @adev: ATA device * * Block UDMA on devices that cause trouble with this controller. */ -static unsigned long hpt370a_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) +static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) { - if (adev->class != ATA_DEV_ATA) { + if (adev->class == ATA_DEV_ATA) { if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) - mask &= ~ (0x1F << ATA_SHIFT_UDMA); + mask &= ~(0xE0 << ATA_SHIFT_UDMA); } - return ata_pci_default_filter(ap, adev, mask); + return mask; } /** - * hpt37x_pre_reset - reset the hpt37x bus - * @ap: ATA port to reset + * hpt372_filter - mode selection filter + * @adev: ATA device + * @mask: mode mask + * + * The Marvell bridge chips used on the HighPoint SATA cards do not seem + * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes... + */ +static unsigned long hpt372_filter(struct ata_device *adev, unsigned long mask) +{ + if (ata_id_is_sata(adev->id)) + mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA); + + return mask; +} + +/** + * hpt37x_cable_detect - Detect the cable type + * @ap: ATA port to detect on * - * Perform the initial reset handling for the 370/372 and 374 func 0 + * Return the cable type attached to this port */ -static int hpt37x_pre_reset(struct ata_port *ap) +static int hpt37x_cable_detect(struct ata_port *ap) { - u8 scr2, ata66; struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u8 scr2, ata66; pci_read_config_byte(pdev, 0x5B, &scr2); pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01); + + udelay(10); /* debounce */ + /* Cable register now active */ pci_read_config_byte(pdev, 0x5A, &ata66); /* Restore state */ pci_write_config_byte(pdev, 0x5B, scr2); - if (ata66 & (1 << ap->port_no)) - ap->cbl = ATA_CBL_PATA40; + if (ata66 & (2 >> ap->port_no)) + return ATA_CBL_PATA40; else - ap->cbl = ATA_CBL_PATA80; - - /* Reset the state machine */ - pci_write_config_byte(pdev, 0x50, 0x37); - pci_write_config_byte(pdev, 0x54, 0x37); - udelay(100); - - return ata_std_prereset(ap); + return ATA_CBL_PATA80; } /** - * hpt37x_error_handler - reset the hpt374 - * @ap: ATA port to reset + * hpt374_fn1_cable_detect - Detect the cable type + * @ap: ATA port to detect on * - * Perform probe for HPT37x, except for HPT374 channel 2 + * Return the cable type attached to this port */ -static void hpt37x_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} - -static int hpt374_pre_reset(struct ata_port *ap) +static int hpt374_fn1_cable_detect(struct ata_port *ap) { - u16 mcr3, mcr6; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + unsigned int mcrbase = 0x50 + 4 * ap->port_no; + u16 mcr3; u8 ata66; - struct pci_dev *pdev = to_pci_dev(ap->host->dev); /* Do the extra channel work */ - pci_read_config_word(pdev, 0x52, &mcr3); - pci_read_config_word(pdev, 0x56, &mcr6); - /* Set bit 15 of 0x52 to enable TCBLID as input - Set bit 15 of 0x56 to enable FCBLID as input - */ - pci_write_config_word(pdev, 0x52, mcr3 | 0x8000); - pci_write_config_word(pdev, 0x56, mcr6 | 0x8000); + pci_read_config_word(pdev, mcrbase + 2, &mcr3); + /* Set bit 15 of 0x52 to enable TCBLID as input */ + pci_write_config_word(pdev, mcrbase + 2, mcr3 | 0x8000); pci_read_config_byte(pdev, 0x5A, &ata66); /* Reset TCBLID/FCBLID to output */ - pci_write_config_word(pdev, 0x52, mcr3); - pci_write_config_word(pdev, 0x56, mcr6); + pci_write_config_word(pdev, mcrbase + 2, mcr3); - if (ata66 & (1 << ap->port_no)) - ap->cbl = ATA_CBL_PATA40; + if (ata66 & (2 >> ap->port_no)) + return ATA_CBL_PATA40; else - ap->cbl = ATA_CBL_PATA80; - - /* Reset the state machine */ - pci_write_config_byte(pdev, 0x50, 0x37); - pci_write_config_byte(pdev, 0x54, 0x37); - udelay(100); - - return ata_std_prereset(ap); + return ATA_CBL_PATA80; } /** - * hpt374_error_handler - reset the hpt374 - * @classes: + * hpt37x_pre_reset - reset the hpt37x bus + * @link: ATA link to reset + * @deadline: deadline jiffies for the operation * - * The 374 cable detect is a little different due to the extra - * channels. The function 0 channels work like usual but function 1 - * is special + * Perform the initial reset handling for the HPT37x. */ -static void hpt374_error_handler(struct ata_port *ap) +static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline) { + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); + static const struct pci_bits hpt37x_enable_bits[] = { + { 0x50, 1, 0x04, 0x04 }, + { 0x54, 1, 0x04, 0x04 } + }; - if (!(PCI_FUNC(pdev->devfn) & 1)) - hpt37x_error_handler(ap); - else - ata_bmdma_drive_eh(ap, hpt374_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} + if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no])) + return -ENOENT; -/** - * hpt370_set_piomode - PIO setup - * @ap: ATA interface - * @adev: device on the interface - * - * Perform PIO mode setup. - */ + /* Reset the state machine */ + pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); + udelay(100); -static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev) + return ata_sff_prereset(link, deadline); +} + +static void hpt370_set_mode(struct ata_port *ap, struct ata_device *adev, + u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 addr1, addr2; - u32 reg; - u32 mode; + u32 reg, timing, mask; u8 fast; addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); @@ -562,63 +424,44 @@ static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev) fast |= 0x01; pci_write_config_byte(pdev, addr2, fast); + /* Determine timing mask and find matching mode entry */ + if (mode < XFER_MW_DMA_0) + mask = 0xcfc3ffff; + else if (mode < XFER_UDMA_0) + mask = 0x31c001ff; + else + mask = 0x303c0000; + + timing = hpt37x_find_mode(ap, mode); + pci_read_config_dword(pdev, addr1, ®); - mode = hpt37x_find_mode(ap, adev->pio_mode); - mode &= ~0x8000000; /* No FIFO in PIO */ - mode &= ~0x30070000; /* Leave config bits alone */ - reg &= 0x30070000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); + reg = (reg & ~mask) | (timing & mask); + pci_write_config_dword(pdev, addr1, reg); } - /** - * hpt370_set_dmamode - DMA timing setup + * hpt370_set_piomode - PIO setup * @ap: ATA interface - * @adev: Device being configured + * @adev: device on the interface * - * Set up the channel for MWDMA or UDMA modes. Much the same as with - * PIO, load the mode number and then set MWDMA or UDMA flag. + * Perform PIO mode setup. */ -static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) +static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 addr1, addr2; - u32 reg; - u32 mode; - u8 fast; - - addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); - addr2 = 0x51 + 4 * ap->port_no; - - /* Fast interrupt prediction disable, hold off interrupt disable */ - pci_read_config_byte(pdev, addr2, &fast); - fast &= ~0x02; - fast |= 0x01; - pci_write_config_byte(pdev, addr2, fast); - - pci_read_config_dword(pdev, addr1, ®); - mode = hpt37x_find_mode(ap, adev->dma_mode); - mode |= 0x8000000; /* FIFO in MWDMA or UDMA */ - mode &= ~0xC0000000; /* Leave config bits alone */ - reg &= 0xC0000000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); + hpt370_set_mode(ap, adev, adev->pio_mode); } /** - * hpt370_bmdma_start - DMA engine begin - * @qc: ATA command + * hpt370_set_dmamode - DMA timing setup + * @ap: ATA interface + * @adev: Device being configured * - * The 370 and 370A want us to reset the DMA engine each time we - * use it. The 372 and later are fine. + * Set up the channel for MWDMA or UDMA modes. */ -static void hpt370_bmdma_start(struct ata_queued_cmd *qc) +static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) { - struct ata_port *ap = qc->ap; - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); - udelay(10); - ata_bmdma_start(qc); + hpt370_set_mode(ap, adev, adev->dma_mode); } /** @@ -632,24 +475,25 @@ static void hpt370_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u8 dma_stat = inb(ap->ioaddr.bmdma_addr + 2); + void __iomem *bmdma = ap->ioaddr.bmdma_addr; + u8 dma_stat = ioread8(bmdma + ATA_DMA_STATUS); u8 dma_cmd; - unsigned long bmdma = ap->ioaddr.bmdma_addr; - if (dma_stat & 0x01) { + if (dma_stat & ATA_DMA_ACTIVE) { udelay(20); - dma_stat = inb(bmdma + 2); + dma_stat = ioread8(bmdma + ATA_DMA_STATUS); } - if (dma_stat & 0x01) { + if (dma_stat & ATA_DMA_ACTIVE) { /* Clear the engine */ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(10); /* Stop DMA */ - dma_cmd = inb(bmdma ); - outb(dma_cmd & 0xFE, bmdma); + dma_cmd = ioread8(bmdma + ATA_DMA_CMD); + iowrite8(dma_cmd & ~ATA_DMA_START, bmdma + ATA_DMA_CMD); /* Clear Error */ - dma_stat = inb(bmdma + 2); - outb(dma_stat | 0x06 , bmdma + 2); + dma_stat = ioread8(bmdma + ATA_DMA_STATUS); + iowrite8(dma_stat | ATA_DMA_INTR | ATA_DMA_ERR, + bmdma + ATA_DMA_STATUS); /* Clear the engine */ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); udelay(10); @@ -657,20 +501,12 @@ static void hpt370_bmdma_stop(struct ata_queued_cmd *qc) ata_bmdma_stop(qc); } -/** - * hpt372_set_piomode - PIO setup - * @ap: ATA interface - * @adev: device on the interface - * - * Perform PIO mode setup. - */ - -static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev) +static void hpt372_set_mode(struct ata_port *ap, struct ata_device *adev, + u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 addr1, addr2; - u32 reg; - u32 mode; + u32 reg, timing, mask; u8 fast; addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); @@ -681,14 +517,32 @@ static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev) fast &= ~0x07; pci_write_config_byte(pdev, addr2, fast); + /* Determine timing mask and find matching mode entry */ + if (mode < XFER_MW_DMA_0) + mask = 0xcfc3ffff; + else if (mode < XFER_UDMA_0) + mask = 0x31c001ff; + else + mask = 0x303c0000; + + timing = hpt37x_find_mode(ap, mode); + pci_read_config_dword(pdev, addr1, ®); - mode = hpt37x_find_mode(ap, adev->pio_mode); + reg = (reg & ~mask) | (timing & mask); + pci_write_config_dword(pdev, addr1, reg); +} + +/** + * hpt372_set_piomode - PIO setup + * @ap: ATA interface + * @adev: device on the interface + * + * Perform PIO mode setup. + */ - printk("Find mode for %d reports %X\n", adev->pio_mode, mode); - mode &= ~0x80000000; /* No FIFO in PIO */ - mode &= ~0x30070000; /* Leave config bits alone */ - reg &= 0x30070000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); +static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + hpt372_set_mode(ap, adev, adev->pio_mode); } /** @@ -696,33 +550,12 @@ static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev) * @ap: ATA interface * @adev: Device being configured * - * Set up the channel for MWDMA or UDMA modes. Much the same as with - * PIO, load the mode number and then set MWDMA or UDMA flag. + * Set up the channel for MWDMA or UDMA modes. */ static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 addr1, addr2; - u32 reg; - u32 mode; - u8 fast; - - addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); - addr2 = 0x51 + 4 * ap->port_no; - - /* Fast interrupt prediction disable, hold off interrupt disable */ - pci_read_config_byte(pdev, addr2, &fast); - fast &= ~0x07; - pci_write_config_byte(pdev, addr2, fast); - - pci_read_config_dword(pdev, addr1, ®); - mode = hpt37x_find_mode(ap, adev->dma_mode); - printk("Find mode for DMA %d reports %X\n", adev->dma_mode, mode); - mode &= ~0xC0000000; /* Leave config bits alone */ - mode |= 0x80000000; /* FIFO in MWDMA or UDMA */ - reg &= 0xC0000000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); + hpt372_set_mode(ap, adev, adev->dma_mode); } /** @@ -736,7 +569,7 @@ static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - int mscreg = 0x50 + 2 * ap->port_no; + int mscreg = 0x50 + 4 * ap->port_no; u8 bwsr_stat, msc_stat; pci_read_config_byte(pdev, 0x6A, &bwsr_stat); @@ -748,21 +581,7 @@ static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc) static struct scsi_host_template hpt37x_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; /* @@ -770,38 +589,15 @@ static struct scsi_host_template hpt37x_sht = { */ static struct ata_port_operations hpt370_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = hpt370_set_piomode, - .set_dmamode = hpt370_set_dmamode, - .mode_filter = hpt370_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, + .inherits = &ata_bmdma_port_ops, - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = hpt37x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = hpt370_bmdma_start, .bmdma_stop = hpt370_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .mode_filter = hpt370_filter, + .cable_detect = hpt37x_cable_detect, + .set_piomode = hpt370_set_piomode, + .set_dmamode = hpt370_set_dmamode, + .prereset = hpt37x_pre_reset, }; /* @@ -809,122 +605,48 @@ static struct ata_port_operations hpt370_port_ops = { */ static struct ata_port_operations hpt370a_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = hpt370_set_piomode, - .set_dmamode = hpt370_set_dmamode, + .inherits = &hpt370_port_ops, .mode_filter = hpt370a_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = hpt37x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = hpt370_bmdma_start, - .bmdma_stop = hpt370_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; /* - * Configuration for HPT372, HPT371, HPT302. Slightly different PIO - * and DMA mode setting functionality. + * Configuration for HPT371 and HPT302. Slightly different PIO and DMA + * mode setting functionality. */ -static struct ata_port_operations hpt372_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = hpt372_set_piomode, - .set_dmamode = hpt372_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, +static struct ata_port_operations hpt302_port_ops = { + .inherits = &ata_bmdma_port_ops, - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = hpt37x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, .bmdma_stop = hpt37x_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .cable_detect = hpt37x_cable_detect, + .set_piomode = hpt372_set_piomode, + .set_dmamode = hpt372_set_dmamode, + .prereset = hpt37x_pre_reset, }; /* - * Configuration for HPT374. Mode setting works like 372 and friends - * but we have a different cable detection procedure. + * Configuration for HPT372. Mode setting works like 371 and 302 + * but we have a mode filter. */ -static struct ata_port_operations hpt374_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = hpt372_set_piomode, - .set_dmamode = hpt372_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = hpt374_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = hpt37x_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, +static struct ata_port_operations hpt372_port_ops = { + .inherits = &hpt302_port_ops, + .mode_filter = hpt372_filter, +}; - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, +/* + * Configuration for HPT374. Mode setting and filtering works like 372 + * but we have a different cable detection procedure for function 1. + */ - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop +static struct ata_port_operations hpt374_fn1_port_ops = { + .inherits = &hpt372_port_ops, + .cable_detect = hpt374_fn1_cable_detect, }; /** - * htp37x_clock_slot - Turn timing to PC clock entry + * hpt37x_clock_slot - Turn timing to PC clock entry * @freq: Reported frequency timing * @base: Base timing * @@ -958,12 +680,12 @@ static int hpt37x_calibrate_dpll(struct pci_dev *dev) u32 reg5c; int tries; - for(tries = 0; tries < 0x5000; tries++) { + for (tries = 0; tries < 0x5000; tries++) { udelay(50); pci_read_config_byte(dev, 0x5b, ®5b); if (reg5b & 0x80) { /* See if it stays set */ - for(tries = 0; tries < 0x1000; tries ++) { + for (tries = 0; tries < 0x1000; tries++) { pci_read_config_byte(dev, 0x5b, ®5b); /* Failed ? */ if ((reg5b & 0x80) == 0) @@ -971,13 +693,34 @@ static int hpt37x_calibrate_dpll(struct pci_dev *dev) } /* Turn off tuning, we have the DPLL set */ pci_read_config_dword(dev, 0x5c, ®5c); - pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100); + pci_write_config_dword(dev, 0x5c, reg5c & ~0x100); return 1; } } /* Never went stable */ return 0; } + +static u32 hpt374_read_freq(struct pci_dev *pdev) +{ + u32 freq; + unsigned long io_base = pci_resource_start(pdev, 4); + + if (PCI_FUNC(pdev->devfn) & 1) { + struct pci_dev *pdev_0; + + pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1); + /* Someone hot plugged the controller on us ? */ + if (pdev_0 == NULL) + return 0; + io_base = pci_resource_start(pdev_0, 4); + freq = inl(io_base + 0x90); + pci_dev_put(pdev_0); + } else + freq = inl(io_base + 0x90); + return freq; +} + /** * hpt37x_init_one - Initialise an HPT37X/302 * @dev: PCI device @@ -1013,121 +756,157 @@ static int hpt37x_calibrate_dpll(struct pci_dev *dev) static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) { /* HPT370 - UDMA100 */ - static struct ata_port_info info_hpt370 = { - .sht = &hpt37x_sht, - .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, + static const struct ata_port_info info_hpt370 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &hpt370_port_ops }; /* HPT370A - UDMA100 */ - static struct ata_port_info info_hpt370a = { - .sht = &hpt37x_sht, - .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, + static const struct ata_port_info info_hpt370a = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &hpt370a_port_ops }; - /* HPT371, 372 and friends - UDMA133 */ - static struct ata_port_info info_hpt372 = { - .sht = &hpt37x_sht, - .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7f, + /* HPT370 - UDMA66 */ + static const struct ata_port_info info_hpt370_33 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA4, + .port_ops = &hpt370_port_ops + }; + /* HPT370A - UDMA66 */ + static const struct ata_port_info info_hpt370a_33 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA4, + .port_ops = &hpt370a_port_ops + }; + /* HPT372 - UDMA133 */ + static const struct ata_port_info info_hpt372 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &hpt372_port_ops }; - /* HPT371, 372 and friends - UDMA100 at 50MHz clock */ - static struct ata_port_info info_hpt372_50 = { - .sht = &hpt37x_sht, - .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, + /* HPT371, 302 - UDMA133 */ + static const struct ata_port_info info_hpt302 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &hpt302_port_ops + }; + /* HPT374 - UDMA100, function 1 uses different cable_detect method */ + static const struct ata_port_info info_hpt374_fn0 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &hpt372_port_ops }; - /* HPT374 - UDMA133 */ - static struct ata_port_info info_hpt374 = { - .sht = &hpt37x_sht, - .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7f, - .port_ops = &hpt374_port_ops + static const struct ata_port_info info_hpt374_fn1 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, + .port_ops = &hpt374_fn1_port_ops }; static const int MHz[4] = { 33, 40, 50, 66 }; - - struct ata_port_info *port_info[2]; - struct ata_port_info *port; - + void *private_data = NULL; + const struct ata_port_info *ppi[] = { NULL, NULL }; + u8 rev = dev->revision; u8 irqmask; - u32 class_rev; + u8 mcr1; u32 freq; + int prefer_dpll = 1; + + unsigned long iobase = pci_resource_start(dev, 4); const struct hpt_chip *chip_table; int clock_slot; + int rc; - pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); - class_rev &= 0xFF; + rc = pcim_enable_device(dev); + if (rc) + return rc; - if (dev->device == PCI_DEVICE_ID_TTI_HPT366) { + switch (dev->device) { + case PCI_DEVICE_ID_TTI_HPT366: /* May be a later chip in disguise. Check */ /* Older chips are in the HPT366 driver. Ignore them */ - if (class_rev < 3) + if (rev < 3) return -ENODEV; /* N series chips have their own driver. Ignore */ - if (class_rev == 6) + if (rev == 6) return -ENODEV; - switch(class_rev) { - case 3: - port = &info_hpt370; - chip_table = &hpt370; - break; - case 4: - port = &info_hpt370a; - chip_table = &hpt370a; - break; - case 5: - port = &info_hpt372; - chip_table = &hpt372; - break; - default: - printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype please report (%d).\n", class_rev); - return -ENODEV; - } - } else { - switch(dev->device) { - case PCI_DEVICE_ID_TTI_HPT372: - /* 372N if rev >= 2*/ - if (class_rev >= 2) - return -ENODEV; - port = &info_hpt372; - chip_table = &hpt372a; - break; - case PCI_DEVICE_ID_TTI_HPT302: - /* 302N if rev > 1 */ - if (class_rev > 1) - return -ENODEV; - port = &info_hpt372; - /* Check this */ - chip_table = &hpt302; - break; - case PCI_DEVICE_ID_TTI_HPT371: - port = &info_hpt372; - chip_table = &hpt371; - break; - case PCI_DEVICE_ID_TTI_HPT374: - chip_table = &hpt374; - port = &info_hpt374; - break; - default: - printk(KERN_ERR "pata_hpt37x: PCI table is bogus please report (%d).\n", dev->device); - return -ENODEV; + switch (rev) { + case 3: + ppi[0] = &info_hpt370; + chip_table = &hpt370; + prefer_dpll = 0; + break; + case 4: + ppi[0] = &info_hpt370a; + chip_table = &hpt370a; + prefer_dpll = 0; + break; + case 5: + ppi[0] = &info_hpt372; + chip_table = &hpt372; + break; + default: + pr_err("Unknown HPT366 subtype, please report (%d)\n", + rev); + return -ENODEV; } + break; + case PCI_DEVICE_ID_TTI_HPT372: + /* 372N if rev >= 2 */ + if (rev >= 2) + return -ENODEV; + ppi[0] = &info_hpt372; + chip_table = &hpt372a; + break; + case PCI_DEVICE_ID_TTI_HPT302: + /* 302N if rev > 1 */ + if (rev > 1) + return -ENODEV; + ppi[0] = &info_hpt302; + /* Check this */ + chip_table = &hpt302; + break; + case PCI_DEVICE_ID_TTI_HPT371: + if (rev > 1) + return -ENODEV; + ppi[0] = &info_hpt302; + chip_table = &hpt371; + /* + * Single channel device, master is not present but the BIOS + * (or us for non x86) must mark it absent + */ + pci_read_config_byte(dev, 0x50, &mcr1); + mcr1 &= ~0x04; + pci_write_config_byte(dev, 0x50, mcr1); + break; + case PCI_DEVICE_ID_TTI_HPT374: + chip_table = &hpt374; + if (!(PCI_FUNC(dev->devfn) & 1)) + *ppi = &info_hpt374_fn0; + else + *ppi = &info_hpt374_fn1; + break; + default: + pr_err("PCI table is bogus, please report (%d)\n", dev->device); + return -ENODEV; } /* Ok so this is a chip we support */ @@ -1149,18 +928,37 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) pci_write_config_byte(dev, 0x5b, 0x23); - pci_read_config_dword(dev, 0x70, &freq); + /* + * HighPoint does this for HPT372A. + * NOTE: This register is only writeable via I/O space. + */ + if (chip_table == &hpt372a) + outb(0x0e, iobase + 0x9c); + + /* + * Some devices do not let this value be accessed via PCI space + * according to the old driver. In addition we must use the value + * from FN 0 on the HPT374. + */ + + if (chip_table == &hpt374) { + freq = hpt374_read_freq(dev); + if (freq == 0) + return -ENODEV; + } else + freq = inl(iobase + 0x90); + if ((freq >> 12) != 0xABCDE) { int i; u8 sr; u32 total = 0; - printk(KERN_WARNING "pata_hpt37x: BIOS has not set timing clocks.\n"); + pr_warn("BIOS has not set timing clocks\n"); /* This is the process the HPT371 BIOS is reported to use */ - for(i = 0; i < 128; i++) { + for (i = 0; i < 128; i++) { pci_read_config_byte(dev, 0x78, &sr); - total += sr; + total += sr & 0x1FF; udelay(15); } freq = total / 128; @@ -1173,85 +971,96 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) */ clock_slot = hpt37x_clock_slot(freq, chip_table->base); - if (chip_table->clocks[clock_slot] == NULL) { + if (chip_table->clocks[clock_slot] == NULL || prefer_dpll) { /* * We need to try PLL mode instead + * + * For non UDMA133 capable devices we should + * use a 50MHz DPLL by choice */ - unsigned int f_low = (MHz[clock_slot] * chip_table->base) / 192; - unsigned int f_high = f_low + 2; - int adjust; + unsigned int f_low, f_high; + int dpll, adjust; + + /* Compute DPLL */ + dpll = (ppi[0]->udma_mask & 0xC0) ? 3 : 2; + + f_low = (MHz[clock_slot] * 48) / MHz[dpll]; + f_high = f_low + 2; + if (clock_slot > 1) + f_high += 2; - for(adjust = 0; adjust < 8; adjust++) { + /* Select the DPLL clock. */ + pci_write_config_byte(dev, 0x5b, 0x21); + pci_write_config_dword(dev, 0x5C, + (f_high << 16) | f_low | 0x100); + + for (adjust = 0; adjust < 8; adjust++) { if (hpt37x_calibrate_dpll(dev)) break; - /* See if it'll settle at a fractionally different clock */ - if ((adjust & 3) == 3) { - f_low --; - f_high ++; - } - pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low); + /* + * See if it'll settle at a fractionally + * different clock + */ + if (adjust & 1) + f_low -= adjust >> 1; + else + f_high += adjust >> 1; + pci_write_config_dword(dev, 0x5C, + (f_high << 16) | f_low | 0x100); } if (adjust == 8) { - printk(KERN_WARNING "hpt37x: DPLL did not stabilize.\n"); + pr_err("DPLL did not stabilize!\n"); return -ENODEV; } - /* Check if this works for all cases */ - port->private_data = (void *)hpt370_timings_66; + if (dpll == 3) + private_data = (void *)hpt37x_timings_66; + else + private_data = (void *)hpt37x_timings_50; - printk(KERN_INFO "hpt37x: Bus clock %dMHz, using DPLL.\n", MHz[clock_slot]); + pr_info("bus clock %dMHz, using %dMHz DPLL\n", + MHz[clock_slot], MHz[dpll]); } else { - port->private_data = (void *)chip_table->clocks[clock_slot]; + private_data = (void *)chip_table->clocks[clock_slot]; /* - * Perform a final fixup. The 371 and 372 clock determines - * if UDMA133 is available. + * Perform a final fixup. Note that we will have used the + * DPLL on the HPT372 which means we don't have to worry + * about lack of UDMA133 support on lower clocks */ - if (clock_slot == 2 && chip_table == &hpt372) { /* 50Mhz */ - printk(KERN_WARNING "pata_hpt37x: No UDMA133 support available with 50MHz bus clock.\n"); - if (port == &info_hpt372) - port = &info_hpt372_50; - else BUG(); - } - printk(KERN_INFO "hpt37x: %s: Bus clock %dMHz.\n", chip_table->name, MHz[clock_slot]); + if (clock_slot < 2 && ppi[0] == &info_hpt370) + ppi[0] = &info_hpt370_33; + if (clock_slot < 2 && ppi[0] == &info_hpt370a) + ppi[0] = &info_hpt370a_33; + + pr_info("%s using %dMHz bus clock\n", + chip_table->name, MHz[clock_slot]); } - port_info[0] = port_info[1] = port; + /* Now kick off ATA set up */ - return ata_pci_init_one(dev, port_info, 2); + return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0); } -static struct pci_device_id hpt37x[] = { - { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), }, - { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT371), }, - { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372), }, - { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT374), }, - { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302), }, - { 0, }, +static const struct pci_device_id hpt37x[] = { + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), }, + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), }, + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), }, + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), }, + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), }, + + { }, }; static struct pci_driver hpt37x_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = hpt37x, - .probe = hpt37x_init_one, + .probe = hpt37x_init_one, .remove = ata_pci_remove_one }; -static int __init hpt37x_init(void) -{ - return pci_register_driver(&hpt37x_pci_driver); -} - - -static void __exit hpt37x_exit(void) -{ - pci_unregister_driver(&hpt37x_pci_driver); -} - +module_pci_driver(hpt37x_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, hpt37x); MODULE_VERSION(DRV_VERSION); - -module_init(hpt37x_init); -module_exit(hpt37x_exit); diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index 5c5d4f6ab90..b93c0f0729e 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c @@ -1,5 +1,5 @@ /* - * Libata driver for the highpoint 372N and 302N UDMA66 ATA controllers. + * Libata driver for the HighPoint 371N, 372N, and 302N UDMA66 ATA controllers. * * This driver is heavily based upon: * @@ -8,24 +8,25 @@ * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> * Portions Copyright (C) 2001 Sun Microsystems, Inc. * Portions Copyright (C) 2003 Red Hat Inc + * Portions Copyright (C) 2005-2010 MontaVista Software, Inc. * * * TODO - * 371N * Work out best PLL policy */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_hpt3x2n" -#define DRV_VERSION "0.3" +#define DRV_VERSION "0.3.15" enum { HPT_PCI_FAST = (1 << 31), @@ -45,25 +46,24 @@ struct hpt_chip { /* key for bus clock timings * bit - * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW - * DMA. cycles = value + 1 - * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW - * DMA. cycles = value + 1 - * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file + * 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA. + * cycles = value + 1 + * 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA. + * cycles = value + 1 + * 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file * register access. - * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file + * 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file * register access. - * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer. - * during task file register access. - * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA - * xfer. - * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task + * 18:20 udma_cycle_time. Clock cycles for UDMA xfer. + * 21 CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock. + * 22:24 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer. + * 25:27 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file * register access. - * 28 UDMA enable - * 29 DMA enable - * 30 PIO_MST enable. if set, the chip is in bus master mode during - * PIO. - * 31 FIFO enable. + * 28 UDMA enable. + * 29 DMA enable. + * 30 PIO_MST enable. If set, the chip is in bus master mode during + * PIO xfer. + * 31 FIFO enable. Only for PIO. */ /* 66MHz DPLL clocks */ @@ -80,14 +80,13 @@ static struct hpt_clock hpt3x2n_clocks[] = { { XFER_MW_DMA_2, 0x2c829c62 }, { XFER_MW_DMA_1, 0x2c829c66 }, - { XFER_MW_DMA_0, 0x2c829d2c }, + { XFER_MW_DMA_0, 0x2c829d2e }, { XFER_PIO_4, 0x0c829c62 }, { XFER_PIO_3, 0x0c829c84 }, { XFER_PIO_2, 0x0c829ca6 }, { XFER_PIO_1, 0x0d029d26 }, { XFER_PIO_0, 0x0d029d5e }, - { 0, 0x0d029d5e } }; /** @@ -105,7 +104,7 @@ static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed) { struct hpt_clock *clocks = hpt3x2n_clocks; - while(clocks->xfer_speed) { + while (clocks->xfer_speed) { if (clocks->xfer_speed == speed) return clocks->timing; clocks++; @@ -115,64 +114,76 @@ static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed) } /** - * hpt3x2n_pre_reset - reset the hpt3x2n bus - * @ap: ATA port to reset + * hpt372n_filter - mode selection filter + * @adev: ATA device + * @mask: mode mask * - * Perform the initial reset handling for the 3x2n series controllers. - * Reset the hardware and state machine, obtain the cable type. + * The Marvell bridge chips used on the HighPoint SATA cards do not seem + * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes... */ +static unsigned long hpt372n_filter(struct ata_device *adev, unsigned long mask) +{ + if (ata_id_is_sata(adev->id)) + mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA); -static int hpt3xn_pre_reset(struct ata_port *ap) + return mask; +} + +/** + * hpt3x2n_cable_detect - Detect the cable type + * @ap: ATA port to detect on + * + * Return the cable type attached to this port + */ + +static int hpt3x2n_cable_detect(struct ata_port *ap) { u8 scr2, ata66; struct pci_dev *pdev = to_pci_dev(ap->host->dev); pci_read_config_byte(pdev, 0x5B, &scr2); pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01); + + udelay(10); /* debounce */ + /* Cable register now active */ pci_read_config_byte(pdev, 0x5A, &ata66); /* Restore state */ pci_write_config_byte(pdev, 0x5B, scr2); - if (ata66 & (1 << ap->port_no)) - ap->cbl = ATA_CBL_PATA40; + if (ata66 & (2 >> ap->port_no)) + return ATA_CBL_PATA40; else - ap->cbl = ATA_CBL_PATA80; - - /* Reset the state machine */ - pci_write_config_byte(pdev, 0x50, 0x37); - pci_write_config_byte(pdev, 0x54, 0x37); - udelay(100); - - return ata_std_prereset(ap); + return ATA_CBL_PATA80; } /** - * hpt3x2n_error_handler - probe the hpt3x2n bus - * @ap: ATA port to reset + * hpt3x2n_pre_reset - reset the hpt3x2n bus + * @link: ATA link to reset + * @deadline: deadline jiffies for the operation * - * Perform the probe reset handling for the 3x2N + * Perform the initial reset handling for the 3x2n series controllers. + * Reset the hardware and state machine, */ -static void hpt3x2n_error_handler(struct ata_port *ap) +static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline) { - ata_bmdma_drive_eh(ap, hpt3xn_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} + struct ata_port *ap = link->ap; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); -/** - * hpt3x2n_set_piomode - PIO setup - * @ap: ATA interface - * @adev: device on the interface - * - * Perform PIO mode setup. - */ + /* Reset the state machine */ + pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); + udelay(100); -static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev) + return ata_sff_prereset(link, deadline); +} + +static void hpt3x2n_set_mode(struct ata_port *ap, struct ata_device *adev, + u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 addr1, addr2; - u32 reg; - u32 mode; + u32 reg, timing, mask; u8 fast; addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); @@ -183,12 +194,32 @@ static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev) fast &= ~0x07; pci_write_config_byte(pdev, addr2, fast); + /* Determine timing mask and find matching mode entry */ + if (mode < XFER_MW_DMA_0) + mask = 0xcfc3ffff; + else if (mode < XFER_UDMA_0) + mask = 0x31c001ff; + else + mask = 0x303c0000; + + timing = hpt3x2n_find_mode(ap, mode); + pci_read_config_dword(pdev, addr1, ®); - mode = hpt3x2n_find_mode(ap, adev->pio_mode); - mode &= ~0x8000000; /* No FIFO in PIO */ - mode &= ~0x30070000; /* Leave config bits alone */ - reg &= 0x30070000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); + reg = (reg & ~mask) | (timing & mask); + pci_write_config_dword(pdev, addr1, reg); +} + +/** + * hpt3x2n_set_piomode - PIO setup + * @ap: ATA interface + * @adev: device on the interface + * + * Perform PIO mode setup. + */ + +static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + hpt3x2n_set_mode(ap, adev, adev->pio_mode); } /** @@ -196,32 +227,12 @@ static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev) * @ap: ATA interface * @adev: Device being configured * - * Set up the channel for MWDMA or UDMA modes. Much the same as with - * PIO, load the mode number and then set MWDMA or UDMA flag. + * Set up the channel for MWDMA or UDMA modes. */ static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 addr1, addr2; - u32 reg; - u32 mode; - u8 fast; - - addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); - addr2 = 0x51 + 4 * ap->port_no; - - /* Fast interrupt prediction disable, hold off interrupt disable */ - pci_read_config_byte(pdev, addr2, &fast); - fast &= ~0x07; - pci_write_config_byte(pdev, addr2, fast); - - pci_read_config_dword(pdev, addr1, ®); - mode = hpt3x2n_find_mode(ap, adev->dma_mode); - mode |= 0x8000000; /* FIFO in MWDMA or UDMA */ - mode &= ~0xC0000000; /* Leave config bits alone */ - reg &= 0xC0000000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); + hpt3x2n_set_mode(ap, adev, adev->dma_mode); } /** @@ -263,124 +274,102 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc) static void hpt3x2n_set_clock(struct ata_port *ap, int source) { - unsigned long bmdma = ap->ioaddr.bmdma_addr; + void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8; /* Tristate the bus */ - outb(0x80, bmdma+0x73); - outb(0x80, bmdma+0x77); + iowrite8(0x80, bmdma+0x73); + iowrite8(0x80, bmdma+0x77); /* Switch clock and reset channels */ - outb(source, bmdma+0x7B); - outb(0xC0, bmdma+0x79); + iowrite8(source, bmdma+0x7B); + iowrite8(0xC0, bmdma+0x79); - /* Reset state machines */ - outb(0x37, bmdma+0x70); - outb(0x37, bmdma+0x74); + /* Reset state machines, avoid enabling the disabled channels */ + iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70); + iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74); /* Complete reset */ - outb(0x00, bmdma+0x79); + iowrite8(0x00, bmdma+0x79); /* Reconnect channels to bus */ - outb(0x00, bmdma+0x73); - outb(0x00, bmdma+0x77); -} - -/* Check if our partner interface is busy */ - -static int hpt3x2n_pair_idle(struct ata_port *ap) -{ - struct ata_host *host = ap->host; - struct ata_port *pair = host->ports[ap->port_no ^ 1]; - - if (pair->hsm_task_state == HSM_ST_IDLE) - return 1; - return 0; + iowrite8(0x00, bmdma+0x73); + iowrite8(0x00, bmdma+0x77); } -static int hpt3x2n_use_dpll(struct ata_port *ap, int reading) +static int hpt3x2n_use_dpll(struct ata_port *ap, int writing) { long flags = (long)ap->host->private_data; + /* See if we should use the DPLL */ - if (reading == 0) + if (writing) return USE_DPLL; /* Needed for write */ if (flags & PCI66) return USE_DPLL; /* Needed at 66Mhz */ return 0; } -static unsigned int hpt3x2n_qc_issue_prot(struct ata_queued_cmd *qc) +static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_port *alt = ap->host->ports[ap->port_no ^ 1]; + int rc, flags = (long)ap->host->private_data; + int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); + + /* First apply the usual rules */ + rc = ata_std_qc_defer(qc); + if (rc != 0) + return rc; + + if ((flags & USE_DPLL) != dpll && alt->qc_active) + return ATA_DEFER_PORT; + return 0; +} + +static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc) { - struct ata_taskfile *tf = &qc->tf; struct ata_port *ap = qc->ap; int flags = (long)ap->host->private_data; + int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); - if (hpt3x2n_pair_idle(ap)) { - int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE)); - if ((flags & USE_DPLL) != dpll) { - if (dpll == 1) - hpt3x2n_set_clock(ap, 0x21); - else - hpt3x2n_set_clock(ap, 0x23); - } + if ((flags & USE_DPLL) != dpll) { + flags &= ~USE_DPLL; + flags |= dpll; + ap->host->private_data = (void *)(long)flags; + + hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23); } - return ata_qc_issue_prot(qc); + return ata_bmdma_qc_issue(qc); } static struct scsi_host_template hpt3x2n_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; /* - * Configuration for HPT3x2n. + * Configuration for HPT302N/371N. */ -static struct ata_port_operations hpt3x2n_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = hpt3x2n_set_piomode, - .set_dmamode = hpt3x2n_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = hpt3x2n_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, +static struct ata_port_operations hpt3xxn_port_ops = { + .inherits = &ata_bmdma_port_ops, - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, .bmdma_stop = hpt3x2n_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = hpt3x2n_qc_issue_prot, + .qc_defer = hpt3x2n_qc_defer, + .qc_issue = hpt3x2n_qc_issue, - .data_xfer = ata_pio_data_xfer, + .cable_detect = hpt3x2n_cable_detect, + .set_piomode = hpt3x2n_set_piomode, + .set_dmamode = hpt3x2n_set_dmamode, + .prereset = hpt3x2n_pre_reset, +}; - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, +/* + * Configuration for HPT372N. Same as 302N/371N but we have a mode filter. + */ - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop +static struct ata_port_operations hpt372n_port_ops = { + .inherits = &hpt3xxn_port_ops, + .mode_filter = &hpt372n_filter, }; /** @@ -397,12 +386,12 @@ static int hpt3xn_calibrate_dpll(struct pci_dev *dev) u32 reg5c; int tries; - for(tries = 0; tries < 0x5000; tries++) { + for (tries = 0; tries < 0x5000; tries++) { udelay(50); pci_read_config_byte(dev, 0x5b, ®5b); if (reg5b & 0x80) { /* See if it stays set */ - for(tries = 0; tries < 0x1000; tries ++) { + for (tries = 0; tries < 0x1000; tries++) { pci_read_config_byte(dev, 0x5b, ®5b); /* Failed ? */ if ((reg5b & 0x80) == 0) @@ -410,7 +399,7 @@ static int hpt3xn_calibrate_dpll(struct pci_dev *dev) } /* Turn off tuning, we have the DPLL set */ pci_read_config_dword(dev, 0x5c, ®5c); - pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100); + pci_write_config_dword(dev, 0x5c, reg5c & ~0x100); return 1; } } @@ -422,11 +411,23 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev) { unsigned long freq; u32 fcnt; + unsigned long iobase = pci_resource_start(pdev, 4); - pci_read_config_dword(pdev, 0x70/*CHECKME*/, &fcnt); + fcnt = inl(iobase + 0x90); /* Not PCI readable for some chips */ if ((fcnt >> 12) != 0xABCDE) { - printk(KERN_WARNING "hpt3xn: BIOS clock data not set.\n"); - return 33; /* Not BIOS set */ + int i; + u16 sr; + u32 total = 0; + + pr_warn("BIOS clock data not set\n"); + + /* This is the process the HPT371 BIOS is reported to use */ + for (i = 0; i < 128; i++) { + pci_read_config_word(pdev, 0x78, &sr); + total += sr & 0x1FF; + udelay(15); + } + fcnt = total / 128; } fcnt &= 0x1FF; @@ -468,53 +469,68 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev) * HPT372N 9 (HPT372N) * UDMA133 * * (1) UDMA133 support depends on the bus clock - * - * To pin down HPT371N */ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - /* HPT372N and friends - UDMA133 */ - static struct ata_port_info info = { - .sht = &hpt3x2n_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7f, - .port_ops = &hpt3x2n_port_ops + /* HPT372N - UDMA133 */ + static const struct ata_port_info info_hpt372n = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &hpt372n_port_ops }; - struct ata_port_info *port_info[2]; - struct ata_port_info *port = &info; - + /* HPT302N and HPT371N - UDMA133 */ + static const struct ata_port_info info_hpt3xxn = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &hpt3xxn_port_ops + }; + const struct ata_port_info *ppi[] = { &info_hpt3xxn, NULL }; + u8 rev = dev->revision; u8 irqmask; - u32 class_rev; - unsigned int pci_mhz; unsigned int f_low, f_high; int adjust; - - pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); - class_rev &= 0xFF; - - switch(dev->device) { - case PCI_DEVICE_ID_TTI_HPT366: - if (class_rev < 6) - return -ENODEV; - break; - case PCI_DEVICE_ID_TTI_HPT372: - /* 372N if rev >= 1*/ - if (class_rev == 0) - return -ENODEV; - break; - case PCI_DEVICE_ID_TTI_HPT302: - if (class_rev < 2) - return -ENODEV; - break; - case PCI_DEVICE_ID_TTI_HPT372N: - break; - default: - printk(KERN_ERR "pata_hpt3x2n: PCI table is bogus please report (%d).\n", dev->device); + unsigned long iobase = pci_resource_start(dev, 4); + void *hpriv = (void *)USE_DPLL; + int rc; + + rc = pcim_enable_device(dev); + if (rc) + return rc; + + switch (dev->device) { + case PCI_DEVICE_ID_TTI_HPT366: + /* 372N if rev >= 6 */ + if (rev < 6) + return -ENODEV; + goto hpt372n; + case PCI_DEVICE_ID_TTI_HPT371: + /* 371N if rev >= 2 */ + if (rev < 2) + return -ENODEV; + break; + case PCI_DEVICE_ID_TTI_HPT372: + /* 372N if rev >= 2 */ + if (rev < 2) + return -ENODEV; + goto hpt372n; + case PCI_DEVICE_ID_TTI_HPT302: + /* 302N if rev >= 2 */ + if (rev < 2) return -ENODEV; + break; + case PCI_DEVICE_ID_TTI_HPT372N: +hpt372n: + ppi[0] = &info_hpt372n; + break; + default: + pr_err("PCI table is bogus, please report (%d)\n", dev->device); + return -ENODEV; } /* Ok so this is a chip we support */ @@ -528,8 +544,23 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) irqmask &= ~0x10; pci_write_config_byte(dev, 0x5a, irqmask); - /* Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or - 50 for UDMA100. Right now we always use 66 */ + /* + * HPT371 chips physically have only one channel, the secondary one, + * but the primary channel registers do exist! Go figure... + * So, we manually disable the non-existing channel here + * (if the BIOS hasn't done this already). + */ + if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { + u8 mcr1; + pci_read_config_byte(dev, 0x50, &mcr1); + mcr1 &= ~0x04; + pci_write_config_byte(dev, 0x50, mcr1); + } + + /* + * Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or + * 50 for UDMA100. Right now we always use 66 + */ pci_mhz = hpt3x2n_pci_clock(dev); @@ -541,57 +572,58 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) pci_write_config_byte(dev, 0x5B, 0x21); /* Unlike the 37x we don't try jiggling the frequency */ - for(adjust = 0; adjust < 8; adjust++) { + for (adjust = 0; adjust < 8; adjust++) { if (hpt3xn_calibrate_dpll(dev)) break; pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low); } - if (adjust == 8) - printk(KERN_WARNING "hpt3xn: DPLL did not stabilize.\n"); + if (adjust == 8) { + pr_err("DPLL did not stabilize!\n"); + return -ENODEV; + } + + pr_info("bus clock %dMHz, using 66MHz DPLL\n", pci_mhz); - /* Set our private data up. We only need a few flags so we use - it directly */ - port->private_data = NULL; + /* + * Set our private data up. We only need a few flags + * so we use it directly. + */ if (pci_mhz > 60) - port->private_data = (void *)PCI66; + hpriv = (void *)(PCI66 | USE_DPLL); + + /* + * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in + * the MISC. register to stretch the UltraDMA Tss timing. + * NOTE: This register is only writeable via I/O space. + */ + if (dev->device == PCI_DEVICE_ID_TTI_HPT371) + outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c); /* Now kick off ATA set up */ - port_info[0] = port_info[1] = port; - return ata_pci_init_one(dev, port_info, 2); + return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); } -static struct pci_device_id hpt3x2n[] = { - { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), }, - { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372), }, - { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302), }, - { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372N), }, - { 0, }, +static const struct pci_device_id hpt3x2n[] = { + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), }, + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), }, + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), }, + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), }, + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), }, + + { }, }; static struct pci_driver hpt3x2n_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = hpt3x2n, - .probe = hpt3x2n_init_one, + .probe = hpt3x2n_init_one, .remove = ata_pci_remove_one }; -static int __init hpt3x2n_init(void) -{ - return pci_register_driver(&hpt3x2n_pci_driver); -} - - -static void __exit hpt3x2n_exit(void) -{ - pci_unregister_driver(&hpt3x2n_pci_driver); -} - +module_pci_driver(hpt3x2n_pci_driver); MODULE_AUTHOR("Alan Cox"); -MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3x2n/30x"); +MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3xxN"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, hpt3x2n); MODULE_VERSION(DRV_VERSION); - -module_init(hpt3x2n_init); -module_exit(hpt3x2n_exit); diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index 1f084ab1ccc..d019cdd5bc9 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c @@ -16,33 +16,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_hpt3x3" -#define DRV_VERSION "0.4.1" - -static int hpt3x3_probe_init(struct ata_port *ap) -{ - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -/** - * hpt3x3_probe_reset - reset the hpt3x3 bus - * @ap: ATA port to reset - * - * Perform the housekeeping when doing an ATA bus reeset. We just - * need to force the cable type. - */ - -static void hpt3x3_error_handler(struct ata_port *ap) -{ - return ata_bmdma_drive_eh(ap, hpt3x3_probe_init, ata_std_softreset, NULL, ata_std_postreset); -} +#define DRV_VERSION "0.6.1" /** * hpt3x3_set_piomode - PIO setup @@ -71,6 +51,7 @@ static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev) pci_write_config_dword(pdev, 0x48, r2); } +#if defined(CONFIG_PATA_HPT3X3_DMA) /** * hpt3x3_set_dmamode - DMA timing setup * @ap: ATA interface @@ -78,6 +59,9 @@ static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev) * * Set up the channel for MWDMA or UDMA modes. Much the same as with * PIO, load the mode number and then set MWDMA or UDMA flag. + * + * 0x44 : bit 0-2 master mode, 3-5 slave mode, etc + * 0x48 : bit 4/0 DMA/UDMA bit 5/1 for slave etc */ static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev) @@ -95,90 +79,90 @@ static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev) r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */ if (adev->dma_mode >= XFER_UDMA_0) - r2 |= 0x01 << dn; /* Ultra mode */ + r2 |= (0x01 << dn); /* Ultra mode */ else - r2 |= 0x10 << dn; /* MWDMA */ + r2 |= (0x10 << dn); /* MWDMA */ pci_write_config_dword(pdev, 0x44, r1); pci_write_config_dword(pdev, 0x48, r2); } -static struct scsi_host_template hpt3x3_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, -}; +/** + * hpt3x3_freeze - DMA workaround + * @ap: port to freeze + * + * When freezing an HPT3x3 we must stop any pending DMA before + * writing to the control register or the chip will hang + */ -static struct ata_port_operations hpt3x3_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = hpt3x3_set_piomode, - .set_dmamode = hpt3x3_set_dmamode, - .mode_filter = ata_pci_default_filter, +static void hpt3x3_freeze(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, + iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ ATA_DMA_START, + mmio + ATA_DMA_CMD); + ata_sff_dma_pause(ap); + ata_sff_freeze(ap); +} - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = hpt3x3_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, +/** + * hpt3x3_bmdma_setup - DMA workaround + * @qc: Queued command + * + * When issuing BMDMA we must clean up the error/active bits in + * software on this device + */ - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, +static void hpt3x3_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + u8 r = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + r |= ATA_DMA_INTR | ATA_DMA_ERR; + iowrite8(r, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + return ata_bmdma_setup(qc); +} - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, +/** + * hpt3x3_atapi_dma - ATAPI DMA check + * @qc: Queued command + * + * Just say no - we don't do ATAPI DMA + */ + +static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc) +{ + return 1; +} - .data_xfer = ata_pio_data_xfer, +#endif /* CONFIG_PATA_HPT3X3_DMA */ + +static struct scsi_host_template hpt3x3_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, +static struct ata_port_operations hpt3x3_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = hpt3x3_set_piomode, +#if defined(CONFIG_PATA_HPT3X3_DMA) + .set_dmamode = hpt3x3_set_dmamode, + .bmdma_setup = hpt3x3_bmdma_setup, + .check_atapi_dma= hpt3x3_atapi_dma, + .freeze = hpt3x3_freeze, +#endif - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; /** - * hpt3x3_init_one - Initialise an HPT343/363 + * hpt3x3_init_chipset - chip setup * @dev: PCI device - * @id: Entry in match table * - * Perform basic initialisation. The chip has a quirk that it won't - * function unless it is at XX00. The old ATA driver touched this up - * but we leave it for pci quirks to do properly. + * Perform the setup required at boot and on resume. */ -static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id) +static void hpt3x3_init_chipset(struct pci_dev *dev) { - static struct ata_port_info info = { - .sht = &hpt3x3_sht, - .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x07, - .port_ops = &hpt3x3_port_ops - }; - static struct ata_port_info *port_info[2] = { &info, &info }; u16 cmd; - /* Initialize the board */ pci_write_config_word(dev, 0x80, 0x00); /* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */ @@ -187,40 +171,122 @@ static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0); else pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20); +} + +/** + * hpt3x3_init_one - Initialise an HPT343/363 + * @pdev: PCI device + * @id: Entry in match table + * + * Perform basic initialisation. We set the device up so we access all + * ports via BAR4. This is necessary to work around errata. + */ + +static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, +#if defined(CONFIG_PATA_HPT3X3_DMA) + /* Further debug needed */ + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA2, +#endif + .port_ops = &hpt3x3_port_ops + }; + /* Register offsets of taskfiles in BAR4 area */ + static const u8 offset_cmd[2] = { 0x20, 0x28 }; + static const u8 offset_ctl[2] = { 0x36, 0x3E }; + const struct ata_port_info *ppi[] = { &info, NULL }; + struct ata_host *host; + int i, rc; + void __iomem *base; + + hpt3x3_init_chipset(pdev); + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); + if (!host) + return -ENOMEM; + /* acquire resources and fill host */ + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + /* Everything is relative to BAR4 if we set up this way */ + rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + host->iomap = pcim_iomap_table(pdev); + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + + base = host->iomap[4]; /* Bus mastering base */ + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + struct ata_ioports *ioaddr = &ap->ioaddr; + + ioaddr->cmd_addr = base + offset_cmd[i]; + ioaddr->altstatus_addr = + ioaddr->ctl_addr = base + offset_ctl[i]; + ioaddr->scr_addr = NULL; + ata_sff_std_ports(ioaddr); + ioaddr->bmdma_addr = base + 8 * i; + + ata_port_pbar_desc(ap, 4, -1, "ioport"); + ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); + } + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &hpt3x3_sht); +} - /* Now kick off ATA set up */ - return ata_pci_init_one(dev, port_info, 2); +#ifdef CONFIG_PM_SLEEP +static int hpt3x3_reinit_one(struct pci_dev *dev) +{ + struct ata_host *host = pci_get_drvdata(dev); + int rc; + + rc = ata_pci_device_do_resume(dev); + if (rc) + return rc; + + hpt3x3_init_chipset(dev); + + ata_host_resume(host); + return 0; } +#endif + +static const struct pci_device_id hpt3x3[] = { + { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), }, -static struct pci_device_id hpt3x3[] = { - { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT343), }, - { 0, }, + { }, }; static struct pci_driver hpt3x3_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = hpt3x3, .probe = hpt3x3_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = hpt3x3_reinit_one, +#endif }; -static int __init hpt3x3_init(void) -{ - return pci_register_driver(&hpt3x3_pci_driver); -} - - -static void __exit hpt3x3_exit(void) -{ - pci_unregister_driver(&hpt3x3_pci_driver); -} - +module_pci_driver(hpt3x3_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the Highpoint HPT343/363"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, hpt3x3); MODULE_VERSION(DRV_VERSION); - -module_init(hpt3x3_init); -module_exit(hpt3x3_exit); diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c new file mode 100644 index 00000000000..d7c732042a4 --- /dev/null +++ b/drivers/ata/pata_icside.c @@ -0,0 +1,627 @@ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/blkdev.h> +#include <linux/gfp.h> +#include <scsi/scsi_host.h> +#include <linux/ata.h> +#include <linux/libata.h> + +#include <asm/dma.h> +#include <asm/ecard.h> + +#define DRV_NAME "pata_icside" + +#define ICS_IDENT_OFFSET 0x2280 + +#define ICS_ARCIN_V5_INTRSTAT 0x0000 +#define ICS_ARCIN_V5_INTROFFSET 0x0004 + +#define ICS_ARCIN_V6_INTROFFSET_1 0x2200 +#define ICS_ARCIN_V6_INTRSTAT_1 0x2290 +#define ICS_ARCIN_V6_INTROFFSET_2 0x3200 +#define ICS_ARCIN_V6_INTRSTAT_2 0x3290 + +struct portinfo { + unsigned int dataoffset; + unsigned int ctrloffset; + unsigned int stepping; +}; + +static const struct portinfo pata_icside_portinfo_v5 = { + .dataoffset = 0x2800, + .ctrloffset = 0x2b80, + .stepping = 6, +}; + +static const struct portinfo pata_icside_portinfo_v6_1 = { + .dataoffset = 0x2000, + .ctrloffset = 0x2380, + .stepping = 6, +}; + +static const struct portinfo pata_icside_portinfo_v6_2 = { + .dataoffset = 0x3000, + .ctrloffset = 0x3380, + .stepping = 6, +}; + +struct pata_icside_state { + void __iomem *irq_port; + void __iomem *ioc_base; + unsigned int type; + unsigned int dma; + struct { + u8 port_sel; + u8 disabled; + unsigned int speed[ATA_MAX_DEVICES]; + } port[2]; +}; + +struct pata_icside_info { + struct pata_icside_state *state; + struct expansion_card *ec; + void __iomem *base; + void __iomem *irqaddr; + unsigned int irqmask; + const expansioncard_ops_t *irqops; + unsigned int mwdma_mask; + unsigned int nr_ports; + const struct portinfo *port[2]; + unsigned long raw_base; + unsigned long raw_ioc_base; +}; + +#define ICS_TYPE_A3IN 0 +#define ICS_TYPE_A3USER 1 +#define ICS_TYPE_V6 3 +#define ICS_TYPE_V5 15 +#define ICS_TYPE_NOTYPE ((unsigned int)-1) + +/* ---------------- Version 5 PCB Support Functions --------------------- */ +/* Prototype: pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) + * Purpose : enable interrupts from card + */ +static void pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) +{ + struct pata_icside_state *state = ec->irq_data; + + writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET); +} + +/* Prototype: pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) + * Purpose : disable interrupts from card + */ +static void pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) +{ + struct pata_icside_state *state = ec->irq_data; + + readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET); +} + +static const expansioncard_ops_t pata_icside_ops_arcin_v5 = { + .irqenable = pata_icside_irqenable_arcin_v5, + .irqdisable = pata_icside_irqdisable_arcin_v5, +}; + + +/* ---------------- Version 6 PCB Support Functions --------------------- */ +/* Prototype: pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) + * Purpose : enable interrupts from card + */ +static void pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) +{ + struct pata_icside_state *state = ec->irq_data; + void __iomem *base = state->irq_port; + + if (!state->port[0].disabled) + writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); + if (!state->port[1].disabled) + writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); +} + +/* Prototype: pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) + * Purpose : disable interrupts from card + */ +static void pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) +{ + struct pata_icside_state *state = ec->irq_data; + + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); +} + +/* Prototype: pata_icside_irqprobe(struct expansion_card *ec) + * Purpose : detect an active interrupt from card + */ +static int pata_icside_irqpending_arcin_v6(struct expansion_card *ec) +{ + struct pata_icside_state *state = ec->irq_data; + + return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 || + readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1; +} + +static const expansioncard_ops_t pata_icside_ops_arcin_v6 = { + .irqenable = pata_icside_irqenable_arcin_v6, + .irqdisable = pata_icside_irqdisable_arcin_v6, + .irqpending = pata_icside_irqpending_arcin_v6, +}; + + +/* + * SG-DMA support. + * + * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers. + * There is only one DMA controller per card, which means that only + * one drive can be accessed at one time. NOTE! We do not enforce that + * here, but we rely on the main IDE driver spotting that both + * interfaces use the same IRQ, which should guarantee this. + */ + +/* + * Configure the IOMD to give the appropriate timings for the transfer + * mode being requested. We take the advice of the ATA standards, and + * calculate the cycle time based on the transfer mode, and the EIDE + * MW DMA specs that the drive provides in the IDENTIFY command. + * + * We have the following IOMD DMA modes to choose from: + * + * Type Active Recovery Cycle + * A 250 (250) 312 (550) 562 (800) + * B 187 (200) 250 (550) 437 (750) + * C 125 (125) 125 (375) 250 (500) + * D 62 (50) 125 (375) 187 (425) + * + * (figures in brackets are actual measured timings on DIOR/DIOW) + * + * However, we also need to take care of the read/write active and + * recovery timings: + * + * Read Write + * Mode Active -- Recovery -- Cycle IOMD type + * MW0 215 50 215 480 A + * MW1 80 50 50 150 C + * MW2 70 25 25 120 C + */ +static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev) +{ + struct pata_icside_state *state = ap->host->private_data; + struct ata_timing t; + unsigned int cycle; + char iomd_type; + + /* + * DMA is based on a 16MHz clock + */ + if (ata_timing_compute(adev, adev->dma_mode, &t, 1000, 1)) + return; + + /* + * Choose the IOMD cycle timing which ensure that the interface + * satisfies the measured active, recovery and cycle times. + */ + if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425) + iomd_type = 'D', cycle = 187; + else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500) + iomd_type = 'C', cycle = 250; + else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750) + iomd_type = 'B', cycle = 437; + else + iomd_type = 'A', cycle = 562; + + ata_dev_info(adev, "timings: act %dns rec %dns cyc %dns (%c)\n", + t.active, t.recover, t.cycle, iomd_type); + + state->port[ap->port_no].speed[adev->devno] = cycle; +} + +static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct pata_icside_state *state = ap->host->private_data; + unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE; + + /* + * We are simplex; BUG if we try to fiddle with DMA + * while it's active. + */ + BUG_ON(dma_channel_active(state->dma)); + + /* + * Route the DMA signals to the correct interface + */ + writeb(state->port[ap->port_no].port_sel, state->ioc_base); + + set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]); + set_dma_sg(state->dma, qc->sg, qc->n_elem); + set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ); + + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} + +static void pata_icside_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct pata_icside_state *state = ap->host->private_data; + + BUG_ON(dma_channel_active(state->dma)); + enable_dma(state->dma); +} + +static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct pata_icside_state *state = ap->host->private_data; + + disable_dma(state->dma); + + /* see ata_bmdma_stop */ + ata_sff_dma_pause(ap); +} + +static u8 pata_icside_bmdma_status(struct ata_port *ap) +{ + struct pata_icside_state *state = ap->host->private_data; + void __iomem *irq_port; + + irq_port = state->irq_port + (ap->port_no ? ICS_ARCIN_V6_INTRSTAT_2 : + ICS_ARCIN_V6_INTRSTAT_1); + + return readb(irq_port) & 1 ? ATA_DMA_INTR : 0; +} + +static int icside_dma_init(struct pata_icside_info *info) +{ + struct pata_icside_state *state = info->state; + struct expansion_card *ec = info->ec; + int i; + + for (i = 0; i < ATA_MAX_DEVICES; i++) { + state->port[0].speed[i] = 480; + state->port[1].speed[i] = 480; + } + + if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { + state->dma = ec->dma; + info->mwdma_mask = ATA_MWDMA2; + } + + return 0; +} + + +static struct scsi_host_template pata_icside_sht = { + ATA_BASE_SHT(DRV_NAME), + .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, + .dma_boundary = IOMD_DMA_BOUNDARY, +}; + +static void pata_icside_postreset(struct ata_link *link, unsigned int *classes) +{ + struct ata_port *ap = link->ap; + struct pata_icside_state *state = ap->host->private_data; + + if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE) + return ata_sff_postreset(link, classes); + + state->port[ap->port_no].disabled = 1; + + if (state->type == ICS_TYPE_V6) { + /* + * Disable interrupts from this port, otherwise we + * receive spurious interrupts from the floating + * interrupt line. + */ + void __iomem *irq_port = state->irq_port + + (ap->port_no ? ICS_ARCIN_V6_INTROFFSET_2 : ICS_ARCIN_V6_INTROFFSET_1); + readb(irq_port); + } +} + +static struct ata_port_operations pata_icside_port_ops = { + .inherits = &ata_bmdma_port_ops, + /* no need to build any PRD tables for DMA */ + .qc_prep = ata_noop_qc_prep, + .sff_data_xfer = ata_sff_data_xfer_noirq, + .bmdma_setup = pata_icside_bmdma_setup, + .bmdma_start = pata_icside_bmdma_start, + .bmdma_stop = pata_icside_bmdma_stop, + .bmdma_status = pata_icside_bmdma_status, + + .cable_detect = ata_cable_40wire, + .set_dmamode = pata_icside_set_dmamode, + .postreset = pata_icside_postreset, + + .port_start = ATA_OP_NULL, /* don't need PRD table */ +}; + +static void pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base, + struct pata_icside_info *info, + const struct portinfo *port) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + void __iomem *cmd = base + port->dataoffset; + + ioaddr->cmd_addr = cmd; + ioaddr->data_addr = cmd + (ATA_REG_DATA << port->stepping); + ioaddr->error_addr = cmd + (ATA_REG_ERR << port->stepping); + ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << port->stepping); + ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << port->stepping); + ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << port->stepping); + ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << port->stepping); + ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << port->stepping); + ioaddr->device_addr = cmd + (ATA_REG_DEVICE << port->stepping); + ioaddr->status_addr = cmd + (ATA_REG_STATUS << port->stepping); + ioaddr->command_addr = cmd + (ATA_REG_CMD << port->stepping); + + ioaddr->ctl_addr = base + port->ctrloffset; + ioaddr->altstatus_addr = ioaddr->ctl_addr; + + ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", + info->raw_base + port->dataoffset, + info->raw_base + port->ctrloffset); + + if (info->raw_ioc_base) + ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base); +} + +static int pata_icside_register_v5(struct pata_icside_info *info) +{ + struct pata_icside_state *state = info->state; + void __iomem *base; + + base = ecardm_iomap(info->ec, ECARD_RES_MEMC, 0, 0); + if (!base) + return -ENOMEM; + + state->irq_port = base; + + info->base = base; + info->irqaddr = base + ICS_ARCIN_V5_INTRSTAT; + info->irqmask = 1; + info->irqops = &pata_icside_ops_arcin_v5; + info->nr_ports = 1; + info->port[0] = &pata_icside_portinfo_v5; + + info->raw_base = ecard_resource_start(info->ec, ECARD_RES_MEMC); + + return 0; +} + +static int pata_icside_register_v6(struct pata_icside_info *info) +{ + struct pata_icside_state *state = info->state; + struct expansion_card *ec = info->ec; + void __iomem *ioc_base, *easi_base; + unsigned int sel = 0; + + ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); + if (!ioc_base) + return -ENOMEM; + + easi_base = ioc_base; + + if (ecard_resource_flags(ec, ECARD_RES_EASI)) { + easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0); + if (!easi_base) + return -ENOMEM; + + /* + * Enable access to the EASI region. + */ + sel = 1 << 5; + } + + writeb(sel, ioc_base); + + state->irq_port = easi_base; + state->ioc_base = ioc_base; + state->port[0].port_sel = sel; + state->port[1].port_sel = sel | 1; + + info->base = easi_base; + info->irqops = &pata_icside_ops_arcin_v6; + info->nr_ports = 2; + info->port[0] = &pata_icside_portinfo_v6_1; + info->port[1] = &pata_icside_portinfo_v6_2; + + info->raw_base = ecard_resource_start(ec, ECARD_RES_EASI); + info->raw_ioc_base = ecard_resource_start(ec, ECARD_RES_IOCFAST); + + return icside_dma_init(info); +} + +static int pata_icside_add_ports(struct pata_icside_info *info) +{ + struct expansion_card *ec = info->ec; + struct ata_host *host; + int i; + + if (info->irqaddr) { + ec->irqaddr = info->irqaddr; + ec->irqmask = info->irqmask; + } + if (info->irqops) + ecard_setirq(ec, info->irqops, info->state); + + /* + * Be on the safe side - disable interrupts + */ + ec->ops->irqdisable(ec, ec->irq); + + host = ata_host_alloc(&ec->dev, info->nr_ports); + if (!host) + return -ENOMEM; + + host->private_data = info->state; + host->flags = ATA_HOST_SIMPLEX; + + for (i = 0; i < info->nr_ports; i++) { + struct ata_port *ap = host->ports[i]; + + ap->pio_mask = ATA_PIO4; + ap->mwdma_mask = info->mwdma_mask; + ap->flags |= ATA_FLAG_SLAVE_POSS; + ap->ops = &pata_icside_port_ops; + + pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); + } + + return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0, + &pata_icside_sht); +} + +static int pata_icside_probe(struct expansion_card *ec, + const struct ecard_id *id) +{ + struct pata_icside_state *state; + struct pata_icside_info info; + void __iomem *idmem; + int ret; + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + state = devm_kzalloc(&ec->dev, sizeof(*state), GFP_KERNEL); + if (!state) { + ret = -ENOMEM; + goto release; + } + + state->type = ICS_TYPE_NOTYPE; + state->dma = NO_DMA; + + idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); + if (idmem) { + unsigned int type; + + type = readb(idmem + ICS_IDENT_OFFSET) & 1; + type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1; + type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2; + type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3; + ecardm_iounmap(ec, idmem); + + state->type = type; + } + + memset(&info, 0, sizeof(info)); + info.state = state; + info.ec = ec; + + switch (state->type) { + case ICS_TYPE_A3IN: + dev_warn(&ec->dev, "A3IN unsupported\n"); + ret = -ENODEV; + break; + + case ICS_TYPE_A3USER: + dev_warn(&ec->dev, "A3USER unsupported\n"); + ret = -ENODEV; + break; + + case ICS_TYPE_V5: + ret = pata_icside_register_v5(&info); + break; + + case ICS_TYPE_V6: + ret = pata_icside_register_v6(&info); + break; + + default: + dev_warn(&ec->dev, "unknown interface type\n"); + ret = -ENODEV; + break; + } + + if (ret == 0) + ret = pata_icside_add_ports(&info); + + if (ret == 0) + goto out; + + release: + ecard_release_resources(ec); + out: + return ret; +} + +static void pata_icside_shutdown(struct expansion_card *ec) +{ + struct ata_host *host = ecard_get_drvdata(ec); + unsigned long flags; + + /* + * Disable interrupts from this card. We need to do + * this before disabling EASI since we may be accessing + * this register via that region. + */ + local_irq_save(flags); + ec->ops->irqdisable(ec, ec->irq); + local_irq_restore(flags); + + /* + * Reset the ROM pointer so that we can read the ROM + * after a soft reboot. This also disables access to + * the IDE taskfile via the EASI region. + */ + if (host) { + struct pata_icside_state *state = host->private_data; + if (state->ioc_base) + writeb(0, state->ioc_base); + } +} + +static void pata_icside_remove(struct expansion_card *ec) +{ + struct ata_host *host = ecard_get_drvdata(ec); + struct pata_icside_state *state = host->private_data; + + ata_host_detach(host); + + pata_icside_shutdown(ec); + + /* + * don't NULL out the drvdata - devres/libata wants it + * to free the ata_host structure. + */ + if (state->dma != NO_DMA) + free_dma(state->dma); + + ecard_release_resources(ec); +} + +static const struct ecard_id pata_icside_ids[] = { + { MANU_ICS, PROD_ICS_IDE }, + { MANU_ICS2, PROD_ICS2_IDE }, + { 0xffff, 0xffff } +}; + +static struct ecard_driver pata_icside_driver = { + .probe = pata_icside_probe, + .remove = pata_icside_remove, + .shutdown = pata_icside_shutdown, + .id_table = pata_icside_ids, + .drv = { + .name = DRV_NAME, + }, +}; + +static int __init pata_icside_init(void) +{ + return ecard_register_driver(&pata_icside_driver); +} + +static void __exit pata_icside_exit(void) +{ + ecard_remove_driver(&pata_icside_driver); +} + +MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ICS PATA driver"); + +module_init(pata_icside_init); +module_exit(pata_icside_exit); diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c new file mode 100644 index 00000000000..af424573c2f --- /dev/null +++ b/drivers/ata/pata_imx.c @@ -0,0 +1,258 @@ +/* + * Freescale iMX PATA driver + * + * Copyright (C) 2011 Arnaud Patard <arnaud.patard@rtp-net.org> + * + * Based on pata_platform - Copyright (C) 2006 - 2007 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * TODO: + * - dmaengine support + * - check if timing stuff needed + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/blkdev.h> +#include <scsi/scsi_host.h> +#include <linux/ata.h> +#include <linux/libata.h> +#include <linux/platform_device.h> +#include <linux/clk.h> + +#define DRV_NAME "pata_imx" + +#define PATA_IMX_ATA_CONTROL 0x24 +#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7) +#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6) +#define PATA_IMX_ATA_CTRL_IORDY_EN (1<<0) +#define PATA_IMX_ATA_INT_EN 0x2C +#define PATA_IMX_ATA_INTR_ATA_INTRQ2 (1<<3) +#define PATA_IMX_DRIVE_DATA 0xA0 +#define PATA_IMX_DRIVE_CONTROL 0xD8 + +struct pata_imx_priv { + struct clk *clk; + /* timings/interrupt/control regs */ + void __iomem *host_regs; + u32 ata_ctl; +}; + +static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused) +{ + struct ata_device *dev; + struct ata_port *ap = link->ap; + struct pata_imx_priv *priv = ap->host->private_data; + u32 val; + + ata_for_each_dev(dev, link, ENABLED) { + dev->pio_mode = dev->xfer_mode = XFER_PIO_0; + dev->xfer_shift = ATA_SHIFT_PIO; + dev->flags |= ATA_DFLAG_PIO; + + val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL); + if (ata_pio_need_iordy(dev)) + val |= PATA_IMX_ATA_CTRL_IORDY_EN; + else + val &= ~PATA_IMX_ATA_CTRL_IORDY_EN; + __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL); + + ata_dev_info(dev, "configured for PIO\n"); + } + return 0; +} + +static struct scsi_host_template pata_imx_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static struct ata_port_operations pata_imx_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + .cable_detect = ata_cable_unknown, + .set_mode = pata_imx_set_mode, +}; + +static void pata_imx_setup_port(struct ata_ioports *ioaddr) +{ + /* Fixup the port shift for platforms that need it */ + ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2); + ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2); + ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2); + ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2); + ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2); + ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2); + ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2); + ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2); + ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2); + ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2); +} + +static int pata_imx_probe(struct platform_device *pdev) +{ + struct ata_host *host; + struct ata_port *ap; + struct pata_imx_priv *priv; + int irq = 0; + struct resource *io_res; + int ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + priv = devm_kzalloc(&pdev->dev, + sizeof(struct pata_imx_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "Failed to get clock\n"); + return PTR_ERR(priv->clk); + } + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + host = ata_host_alloc(&pdev->dev, 1); + if (!host) { + ret = -ENOMEM; + goto err; + } + + host->private_data = priv; + ap = host->ports[0]; + + ap->ops = &pata_imx_port_ops; + ap->pio_mask = ATA_PIO0; + ap->flags |= ATA_FLAG_SLAVE_POSS; + + io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->host_regs = devm_ioremap_resource(&pdev->dev, io_res); + if (IS_ERR(priv->host_regs)) { + ret = PTR_ERR(priv->host_regs); + goto err; + } + + ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA; + ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL; + + ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; + + pata_imx_setup_port(&ap->ioaddr); + + ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", + (unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA, + (unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL); + + /* deassert resets */ + __raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B | + PATA_IMX_ATA_CTRL_ATA_RST_B, + priv->host_regs + PATA_IMX_ATA_CONTROL); + /* enable interrupts */ + __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2, + priv->host_regs + PATA_IMX_ATA_INT_EN); + + /* activate */ + ret = ata_host_activate(host, irq, ata_sff_interrupt, 0, + &pata_imx_sht); + + if (ret) + goto err; + + return 0; +err: + clk_disable_unprepare(priv->clk); + + return ret; +} + +static int pata_imx_remove(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + struct pata_imx_priv *priv = host->private_data; + + ata_host_detach(host); + + __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int pata_imx_suspend(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct pata_imx_priv *priv = host->private_data; + int ret; + + ret = ata_host_suspend(host, PMSG_SUSPEND); + if (!ret) { + __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); + priv->ata_ctl = + __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL); + clk_disable_unprepare(priv->clk); + } + + return ret; +} + +static int pata_imx_resume(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct pata_imx_priv *priv = host->private_data; + + int ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL); + + __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2, + priv->host_regs + PATA_IMX_ATA_INT_EN); + + ata_host_resume(host); + + return 0; +} + +static const struct dev_pm_ops pata_imx_pm_ops = { + .suspend = pata_imx_suspend, + .resume = pata_imx_resume, +}; +#endif + +static const struct of_device_id imx_pata_dt_ids[] = { + { + .compatible = "fsl,imx27-pata", + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, imx_pata_dt_ids); + +static struct platform_driver pata_imx_driver = { + .probe = pata_imx_probe, + .remove = pata_imx_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = imx_pata_dt_ids, + .owner = THIS_MODULE, +#ifdef CONFIG_PM_SLEEP + .pm = &pata_imx_pm_ops, +#endif + }, +}; + +module_platform_driver(pata_imx_driver); + +MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); +MODULE_DESCRIPTION("low-level driver for iMX PATA"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c index 640b8b0954f..b33d1f99b3a 100644 --- a/drivers/ata/pata_isapnp.c +++ b/drivers/ata/pata_isapnp.c @@ -1,7 +1,7 @@ /* * pata-isapnp.c - ISA PnP PATA controller driver. - * Copyright 2005/2006 Red Hat Inc <alan@redhat.com>, all rights reserved. + * Copyright 2005/2006 Red Hat Inc, all rights reserved. * * Based in part on ide-pnp.c by Andrey Panin <pazke@donpac.ru> */ @@ -17,50 +17,22 @@ #include <linux/libata.h> #define DRV_NAME "pata_isapnp" -#define DRV_VERSION "0.1.5" +#define DRV_VERSION "0.2.5" static struct scsi_host_template isapnp_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations isapnp_port_ops = { - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ata_bmdma_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, +}; - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop +static struct ata_port_operations isapnp_noalt_port_ops = { + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, + /* No altstatus so we don't want to use the lost interrupt poll */ + .lost_interrupt = ATA_OP_NULL, }; /** @@ -74,37 +46,55 @@ static struct ata_port_operations isapnp_port_ops = { static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id) { - struct ata_probe_ent ae; + struct ata_host *host; + struct ata_port *ap; + void __iomem *cmd_addr, *ctl_addr; + int irq = 0; + irq_handler_t handler = NULL; if (pnp_port_valid(idev, 0) == 0) return -ENODEV; - /* FIXME: Should selected polled PIO here not fail */ - if (pnp_irq_valid(idev, 0) == 0) - return -ENODEV; + if (pnp_irq_valid(idev, 0)) { + irq = pnp_irq(idev, 0); + handler = ata_sff_interrupt; + } + + /* allocate host */ + host = ata_host_alloc(&idev->dev, 1); + if (!host) + return -ENOMEM; - memset(&ae, 0, sizeof(struct ata_probe_ent)); - INIT_LIST_HEAD(&ae.node); - ae.dev = &idev->dev; - ae.port_ops = &isapnp_port_ops; - ae.sht = &isapnp_sht; - ae.n_ports = 1; - ae.pio_mask = 1; /* ISA so PIO 0 cycles */ - ae.irq = pnp_irq(idev, 0); - ae.irq_flags = 0; - ae.port_flags = ATA_FLAG_SLAVE_POSS; - ae.port[0].cmd_addr = pnp_port_start(idev, 0); - - if (pnp_port_valid(idev, 1) == 0) { - ae.port[0].altstatus_addr = pnp_port_start(idev, 1); - ae.port[0].ctl_addr = pnp_port_start(idev, 1); - ae.port_flags |= ATA_FLAG_SRST; + /* acquire resources and fill host */ + cmd_addr = devm_ioport_map(&idev->dev, pnp_port_start(idev, 0), 8); + if (!cmd_addr) + return -ENOMEM; + + ap = host->ports[0]; + + ap->ops = &isapnp_noalt_port_ops; + ap->pio_mask = ATA_PIO0; + ap->flags |= ATA_FLAG_SLAVE_POSS; + + ap->ioaddr.cmd_addr = cmd_addr; + + if (pnp_port_valid(idev, 1)) { + ctl_addr = devm_ioport_map(&idev->dev, + pnp_port_start(idev, 1), 1); + ap->ioaddr.altstatus_addr = ctl_addr; + ap->ioaddr.ctl_addr = ctl_addr; + ap->ops = &isapnp_port_ops; } - ata_std_ports(&ae.port[0]); - if (ata_device_add(&ae) == 0) - return -ENODEV; - return 0; + ata_sff_std_ports(&ap->ioaddr); + + ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", + (unsigned long long)pnp_port_start(idev, 0), + (unsigned long long)pnp_port_start(idev, 1)); + + /* activate */ + return ata_host_activate(host, irq, handler, 0, + &isapnp_sht); } /** @@ -120,8 +110,7 @@ static void isapnp_remove_one(struct pnp_dev *idev) struct device *dev = &idev->dev; struct ata_host *host = dev_get_drvdata(dev); - ata_host_remove(host); - dev_set_drvdata(dev, NULL); + ata_host_detach(host); } static struct pnp_device_id isapnp_devices[] = { @@ -130,6 +119,8 @@ static struct pnp_device_id isapnp_devices[] = { {.id = ""} }; +MODULE_DEVICE_TABLE(pnp, isapnp_devices); + static struct pnp_driver isapnp_driver = { .name = DRV_NAME, .id_table = isapnp_devices, diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c new file mode 100644 index 00000000000..4f97d1e52f8 --- /dev/null +++ b/drivers/ata/pata_it8213.c @@ -0,0 +1,298 @@ +/* + * pata_it8213.c - iTE Tech. Inc. IT8213 PATA driver + * + * The IT8213 is a very Intel ICH like device for timing purposes, having + * a similar register layout and the same split clock arrangement. Cable + * detection is different, and it does not have slave channels or all the + * clutter of later ICH/SATA setups. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> +#include <linux/ata.h> + +#define DRV_NAME "pata_it8213" +#define DRV_VERSION "0.0.3" + +/** + * it8213_pre_reset - probe begin + * @link: link + * @deadline: deadline jiffies for the operation + * + * Filter out ports by the enable bits before doing the normal reset + * and probe. + */ + +static int it8213_pre_reset(struct ata_link *link, unsigned long deadline) +{ + static const struct pci_bits it8213_enable_bits[] = { + { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ + }; + struct ata_port *ap = link->ap; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no])) + return -ENOENT; + + return ata_sff_prereset(link, deadline); +} + +/** + * it8213_cable_detect - check for 40/80 pin + * @ap: Port + * + * Perform cable detection for the 8213 ATA interface. This is + * different to the PIIX arrangement + */ + +static int it8213_cable_detect(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u8 tmp; + pci_read_config_byte(pdev, 0x42, &tmp); + if (tmp & 2) /* The initial docs are incorrect */ + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; +} + +/** + * it8213_set_piomode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: Device whose timings we are configuring + * + * Set PIO mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev) +{ + unsigned int pio = adev->pio_mode - XFER_PIO_0; + struct pci_dev *dev = to_pci_dev(ap->host->dev); + unsigned int master_port = ap->port_no ? 0x42 : 0x40; + u16 master_data; + int control = 0; + + /* + * See Intel Document 298600-004 for the timing programing rules + * for PIIX/ICH. The 8213 is a clone so very similar + */ + + static const /* ISP RTC */ + u8 timings[][2] = { { 0, 0 }, + { 0, 0 }, + { 1, 0 }, + { 2, 1 }, + { 2, 3 }, }; + + if (pio > 1) + control |= 1; /* TIME */ + if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */ + control |= 2; /* IE */ + /* Bit 2 is set for ATAPI on the IT8213 - reverse of ICH/PIIX */ + if (adev->class != ATA_DEV_ATA) + control |= 4; /* PPE */ + + pci_read_config_word(dev, master_port, &master_data); + + /* Set PPE, IE, and TIME as appropriate */ + if (adev->devno == 0) { + master_data &= 0xCCF0; + master_data |= control; + master_data |= (timings[pio][0] << 12) | + (timings[pio][1] << 8); + } else { + u8 slave_data; + + master_data &= 0xFF0F; + master_data |= (control << 4); + + /* Slave timing in separate register */ + pci_read_config_byte(dev, 0x44, &slave_data); + slave_data &= 0xF0; + slave_data |= (timings[pio][0] << 2) | timings[pio][1]; + pci_write_config_byte(dev, 0x44, slave_data); + } + + master_data |= 0x4000; /* Ensure SITRE is set */ + pci_write_config_word(dev, master_port, master_data); +} + +/** + * it8213_set_dmamode - Initialize host controller PATA DMA timings + * @ap: Port whose timings we are configuring + * @adev: Device to program + * + * Set UDMA/MWDMA mode for device, in host controller PCI config space. + * This device is basically an ICH alike. + * + * LOCKING: + * None (inherited from caller). + */ + +static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev) +{ + struct pci_dev *dev = to_pci_dev(ap->host->dev); + u16 master_data; + u8 speed = adev->dma_mode; + int devid = adev->devno; + u8 udma_enable; + + static const /* ISP RTC */ + u8 timings[][2] = { { 0, 0 }, + { 0, 0 }, + { 1, 0 }, + { 2, 1 }, + { 2, 3 }, }; + + pci_read_config_word(dev, 0x40, &master_data); + pci_read_config_byte(dev, 0x48, &udma_enable); + + if (speed >= XFER_UDMA_0) { + unsigned int udma = adev->dma_mode - XFER_UDMA_0; + u16 udma_timing; + u16 ideconf; + int u_clock, u_speed; + + /* Clocks follow the PIIX style */ + u_speed = min(2 - (udma & 1), udma); + if (udma > 4) + u_clock = 0x1000; /* 100Mhz */ + else if (udma > 2) + u_clock = 1; /* 66Mhz */ + else + u_clock = 0; /* 33Mhz */ + + udma_enable |= (1 << devid); + + /* Load the UDMA cycle time */ + pci_read_config_word(dev, 0x4A, &udma_timing); + udma_timing &= ~(3 << (4 * devid)); + udma_timing |= u_speed << (4 * devid); + pci_write_config_word(dev, 0x4A, udma_timing); + + /* Load the clock selection */ + pci_read_config_word(dev, 0x54, &ideconf); + ideconf &= ~(0x1001 << devid); + ideconf |= u_clock << devid; + pci_write_config_word(dev, 0x54, ideconf); + } else { + /* + * MWDMA is driven by the PIO timings. We must also enable + * IORDY unconditionally along with TIME1. PPE has already + * been set when the PIO timing was set. + */ + unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0; + unsigned int control; + u8 slave_data; + static const unsigned int needed_pio[3] = { + XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 + }; + int pio = needed_pio[mwdma] - XFER_PIO_0; + + control = 3; /* IORDY|TIME1 */ + + /* If the drive MWDMA is faster than it can do PIO then + we must force PIO into PIO0 */ + + if (adev->pio_mode < needed_pio[mwdma]) + /* Enable DMA timing only */ + control |= 8; /* PIO cycles in PIO0 */ + + if (devid) { /* Slave */ + master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */ + master_data |= control << 4; + pci_read_config_byte(dev, 0x44, &slave_data); + slave_data &= 0xF0; + /* Load the matching timing */ + slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); + pci_write_config_byte(dev, 0x44, slave_data); + } else { /* Master */ + master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY + and master timing bits */ + master_data |= control; + master_data |= + (timings[pio][0] << 12) | + (timings[pio][1] << 8); + } + udma_enable &= ~(1 << devid); + pci_write_config_word(dev, 0x40, master_data); + } + pci_write_config_byte(dev, 0x48, udma_enable); +} + +static struct scsi_host_template it8213_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + + +static struct ata_port_operations it8213_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = it8213_cable_detect, + .set_piomode = it8213_set_piomode, + .set_dmamode = it8213_set_dmamode, + .prereset = it8213_pre_reset, +}; + + +/** + * it8213_init_one - Register 8213 ATA PCI device with kernel services + * @pdev: PCI device to register + * @ent: Entry in it8213_pci_tbl matching with @pdev + * + * Called from kernel PCI layer. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, or -ERRNO value. + */ + +static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, + .udma_mask = ATA_UDMA6, + .port_ops = &it8213_ops, + }; + /* Current IT8213 stuff is single port */ + const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0); +} + +static const struct pci_device_id it8213_pci_tbl[] = { + { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8213), }, + + { } /* terminate list */ +}; + +static struct pci_driver it8213_pci_driver = { + .name = DRV_NAME, + .id_table = it8213_pci_tbl, + .probe = it8213_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif +}; + +module_pci_driver(it8213_pci_driver); + +MODULE_AUTHOR("Alan Cox"); +MODULE_DESCRIPTION("SCSI low-level driver for the ITE 8213"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, it8213_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 82a46ff4000..a5088ecb349 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c @@ -1,7 +1,8 @@ /* - * ata-it821x.c - IT821x PATA for new ATA layer + * pata_it821x.c - IT821x PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox <alan@redhat.com> + * Alan Cox <alan@lxorguk.ukuu.org.uk> + * (C) 2007 Bartlomiej Zolnierkiewicz * * based upon * @@ -9,13 +10,13 @@ * * linux/drivers/ide/pci/it821x.c Version 0.09 December 2004 * - * Copyright (C) 2004 Red Hat <alan@redhat.com> + * Copyright (C) 2004 Red Hat * * May be copied or modified under the terms of the GNU General Public License * Based in part on the ITE vendor provided SCSI driver. * - * Documentation available from - * http://www.ite.com.tw/pc/IT8212F_V04.pdf + * Documentation available from IT8212F_V04.pdf + * http://www.ite.com.tw/EN/products_more.aspx?CategoryID=3&ID=5,91 * Some other documents are NDA. * * The ITE8212 isn't exactly a standard IDE controller. It has two @@ -65,22 +66,21 @@ * * TODO * - ATAPI and other speed filtering - * - Command filter in smart mode * - RAID configuration ioctls */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> +#include <linux/slab.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_it821x" -#define DRV_VERSION "0.3.2" +#define DRV_VERSION "0.4.2" struct it821x_dev { @@ -105,7 +105,7 @@ struct it821x_dev /* * We allow users to force the card into non raid mode without - * flashing the alternative BIOS. This is also neccessary right now + * flashing the alternative BIOS. This is also necessary right now * for embedded platforms that cannot run a PC BIOS but are using this * device. */ @@ -113,31 +113,6 @@ struct it821x_dev static int it8212_noraid; /** - * it821x_pre_reset - probe - * @ap: ATA port - * - * Set the cable type - */ - -static int it821x_pre_reset(struct ata_port *ap) -{ - ap->cbl = ATA_CBL_PATA80; - return ata_std_prereset(ap); -} - -/** - * it821x_error_handler - probe/reset - * @ap: ATA port - * - * Set the cable type and trigger a probe - */ - -static void it821x_error_handler(struct ata_port *ap) -{ - return ata_bmdma_drive_eh(ap, it821x_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} - -/** * it821x_program - program the PIO/MWDMA registers * @ap: ATA port * @adev: Device to program @@ -408,7 +383,7 @@ static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc) * @ap: ATA port * @device: Device number (not pointer) * - * Device selection hook. If neccessary perform clock switching + * Device selection hook. If necessary perform clock switching */ static void it821x_passthru_dev_select(struct ata_port *ap, @@ -416,15 +391,15 @@ static void it821x_passthru_dev_select(struct ata_port *ap, { struct it821x_dev *itdev = ap->private_data; if (itdev && device != itdev->last_device) { - struct ata_device *adev = &ap->device[device]; + struct ata_device *adev = &ap->link.device[device]; it821x_program(ap, adev, itdev->pio[adev->devno]); itdev->last_device = device; } - ata_std_dev_select(ap, device); + ata_sff_dev_select(ap, device); } /** - * it821x_smart_qc_issue_prot - wrap qc issue prot + * it821x_smart_qc_issue - wrap qc issue prot * @qc: command * * Wrap the command issue sequence for the IT821x. We need to @@ -432,7 +407,7 @@ static void it821x_passthru_dev_select(struct ata_port *ap, * usual happenings kick off */ -static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc) +static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc) { switch(qc->tf.command) { @@ -450,16 +425,18 @@ static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc) case ATA_CMD_WRITE_MULTI: case ATA_CMD_WRITE_MULTI_EXT: case ATA_CMD_ID_ATA: + case ATA_CMD_INIT_DEV_PARAMS: + case 0xFC: /* Internal 'report rebuild state' */ /* Arguably should just no-op this one */ case ATA_CMD_SET_FEATURES: - return ata_qc_issue_prot(qc); + return ata_bmdma_qc_issue(qc); } printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command); - return AC_ERR_INVALID; + return AC_ERR_DEV; } /** - * it821x_passthru_qc_issue_prot - wrap qc issue prot + * it821x_passthru_qc_issue - wrap qc issue prot * @qc: command * * Wrap the command issue sequence for the IT821x. We need to @@ -467,15 +444,16 @@ static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc) * usual happenings kick off */ -static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc) +static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc) { it821x_passthru_dev_select(qc->ap, qc->dev->devno); - return ata_qc_issue_prot(qc); + return ata_bmdma_qc_issue(qc); } /** * it821x_smart_set_mode - mode setting - * @ap: interface to set up + * @link: interface to set up + * @unused: device that failed (error only) * * Use a non standard set_mode function. We don't want to be tuned. * The BIOS configured everything. Our job is not to fiddle. We @@ -483,84 +461,108 @@ static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc) * and respect them. */ -static void it821x_smart_set_mode(struct ata_port *ap) +static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unused) { - int dma_enabled = 0; - int i; - - /* Bits 5 and 6 indicate if DMA is active on master/slave */ - /* It is possible that BMDMA isn't allocated */ - if (ap->ioaddr.bmdma_addr) - dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; - if (ata_dev_enabled(dev)) { - /* We don't really care */ - dev->pio_mode = XFER_PIO_0; - dev->dma_mode = XFER_MW_DMA_0; - /* We do need the right mode information for DMA or PIO - and this comes from the current configuration flags */ - if (dma_enabled & (1 << (5 + i))) { - dev->xfer_mode = XFER_MW_DMA_0; - dev->xfer_shift = ATA_SHIFT_MWDMA; - dev->flags &= ~ATA_DFLAG_PIO; - } else { - dev->xfer_mode = XFER_PIO_0; - dev->xfer_shift = ATA_SHIFT_PIO; - dev->flags |= ATA_DFLAG_PIO; - } + struct ata_device *dev; + + ata_for_each_dev(dev, link, ENABLED) { + /* We don't really care */ + dev->pio_mode = XFER_PIO_0; + dev->dma_mode = XFER_MW_DMA_0; + /* We do need the right mode information for DMA or PIO + and this comes from the current configuration flags */ + if (ata_id_has_dma(dev->id)) { + ata_dev_info(dev, "configured for DMA\n"); + dev->xfer_mode = XFER_MW_DMA_0; + dev->xfer_shift = ATA_SHIFT_MWDMA; + dev->flags &= ~ATA_DFLAG_PIO; + } else { + ata_dev_info(dev, "configured for PIO\n"); + dev->xfer_mode = XFER_PIO_0; + dev->xfer_shift = ATA_SHIFT_PIO; + dev->flags |= ATA_DFLAG_PIO; } } + return 0; } /** * it821x_dev_config - Called each device identify - * @ap: ATA port * @adev: Device that has just been identified * * Perform the initial setup needed for each device that is chip * special. In our case we need to lock the sector count to avoid * blowing the brains out of the firmware with large LBA48 requests * - * FIXME: When FUA appears we need to block FUA too. And SMART and - * basically we need to filter commands for this chip. */ -static void it821x_dev_config(struct ata_port *ap, struct ata_device *adev) +static void it821x_dev_config(struct ata_device *adev) { - unsigned char model_num[40]; - char *s; - unsigned int len; - - /* This block ought to be a library routine as it is in several - drivers now */ - - ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS, - sizeof(model_num)); - s = &model_num[0]; - len = strnlen(s, sizeof(model_num)); - - /* ATAPI specifies that empty space is blank-filled; remove blanks */ - while ((len > 0) && (s[len - 1] == ' ')) { - len--; - s[len] = 0; - } + unsigned char model_num[ATA_ID_PROD_LEN + 1]; + + ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); if (adev->max_sectors > 255) adev->max_sectors = 255; if (strstr(model_num, "Integrated Technology Express")) { /* RAID mode */ - printk(KERN_INFO "IT821x %sRAID%d volume", - adev->id[147]?"Bootable ":"", - adev->id[129]); + ata_dev_info(adev, "%sRAID%d volume", + adev->id[147] ? "Bootable " : "", + adev->id[129]); if (adev->id[129] != 1) - printk("(%dK stripe)", adev->id[146]); - printk(".\n"); + pr_cont("(%dK stripe)", adev->id[146]); + pr_cont("\n"); } + /* This is a controller firmware triggered funny, don't + report the drive faulty! */ + adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC; + /* No HPA in 'smart' mode */ + adev->horkage |= ATA_HORKAGE_BROKEN_HPA; } +/** + * it821x_read_id - Hack identify data up + * @adev: device to read + * @tf: proposed taskfile + * @id: buffer for returned ident data + * + * Query the devices on this firmware driven port and slightly + * mash the identify data to stop us and common tools trying to + * use features not firmware supported. The firmware itself does + * some masking (eg SMART) but not enough. + */ + +static unsigned int it821x_read_id(struct ata_device *adev, + struct ata_taskfile *tf, u16 *id) +{ + unsigned int err_mask; + unsigned char model_num[ATA_ID_PROD_LEN + 1]; + + err_mask = ata_do_dev_read_id(adev, tf, id); + if (err_mask) + return err_mask; + ata_id_c_string(id, model_num, ATA_ID_PROD, sizeof(model_num)); + + id[83] &= ~(1 << 12); /* Cache flush is firmware handled */ + id[83] &= ~(1 << 13); /* Ditto for LBA48 flushes */ + id[84] &= ~(1 << 6); /* No FUA */ + id[85] &= ~(1 << 10); /* No HPA */ + id[76] = 0; /* No NCQ/AN etc */ + + if (strstr(model_num, "Integrated Technology Express")) { + /* Set feature bits the firmware neglects */ + id[49] |= 0x0300; /* LBA, DMA */ + id[83] &= 0x7FFF; + id[83] |= 0x4400; /* Word 83 is valid and LBA48 */ + id[86] |= 0x0400; /* LBA48 on */ + id[ATA_ID_MAJOR_VER] |= 0x1F; + /* Clear the serial number because it's different each boot + which breaks validation on resume */ + memset(&id[ATA_ID_SERNO], 0x20, ATA_ID_SERNO_LEN); + } + return err_mask; +} /** * it821x_check_atapi_dma - ATAPI DMA handler @@ -575,6 +577,10 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; struct it821x_dev *itdev = ap->private_data; + /* Only use dma for transfers to/from the media. */ + if (ata_qc_raw_nbytes(qc) < 2048) + return -EOPNOTSUPP; + /* No ATAPI DMA in smart mode */ if (itdev->smart) return -EOPNOTSUPP; @@ -585,6 +591,136 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc) return 0; } +/** + * it821x_display_disk - display disk setup + * @n: Device number + * @buf: Buffer block from firmware + * + * Produce a nice informative display of the device setup as provided + * by the firmware. + */ + +static void it821x_display_disk(int n, u8 *buf) +{ + unsigned char id[41]; + int mode = 0; + char *mtype = ""; + char mbuf[8]; + char *cbl = "(40 wire cable)"; + + static const char *types[5] = { + "RAID0", "RAID1", "RAID 0+1", "JBOD", "DISK" + }; + + if (buf[52] > 4) /* No Disk */ + return; + + ata_id_c_string((u16 *)buf, id, 0, 41); + + if (buf[51]) { + mode = ffs(buf[51]); + mtype = "UDMA"; + } else if (buf[49]) { + mode = ffs(buf[49]); + mtype = "MWDMA"; + } + + if (buf[76]) + cbl = ""; + + if (mode) + snprintf(mbuf, 8, "%5s%d", mtype, mode - 1); + else + strcpy(mbuf, "PIO"); + if (buf[52] == 4) + printk(KERN_INFO "%d: %-6s %-8s %s %s\n", + n, mbuf, types[buf[52]], id, cbl); + else + printk(KERN_INFO "%d: %-6s %-8s Volume: %1d %s %s\n", + n, mbuf, types[buf[52]], buf[53], id, cbl); + if (buf[125] < 100) + printk(KERN_INFO "%d: Rebuilding: %d%%\n", n, buf[125]); +} + +/** + * it821x_firmware_command - issue firmware command + * @ap: IT821x port to interrogate + * @cmd: command + * @len: length + * + * Issue firmware commands expecting data back from the controller. We + * use this to issue commands that do not go via the normal paths. Other + * commands such as 0xFC can be issued normally. + */ + +static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len) +{ + u8 status; + int n = 0; + u16 *buf = kmalloc(len, GFP_KERNEL); + if (buf == NULL) { + printk(KERN_ERR "it821x_firmware_command: Out of memory\n"); + return NULL; + } + /* This isn't quite a normal ATA command as we are talking to the + firmware not the drives */ + ap->ctl |= ATA_NIEN; + iowrite8(ap->ctl, ap->ioaddr.ctl_addr); + ata_wait_idle(ap); + iowrite8(ATA_DEVICE_OBS, ap->ioaddr.device_addr); + iowrite8(cmd, ap->ioaddr.command_addr); + udelay(1); + /* This should be almost immediate but a little paranoia goes a long + way. */ + while(n++ < 10) { + status = ioread8(ap->ioaddr.status_addr); + if (status & ATA_ERR) { + kfree(buf); + printk(KERN_ERR "it821x_firmware_command: rejected\n"); + return NULL; + } + if (status & ATA_DRQ) { + ioread16_rep(ap->ioaddr.data_addr, buf, len/2); + return (u8 *)buf; + } + mdelay(1); + } + kfree(buf); + printk(KERN_ERR "it821x_firmware_command: timeout\n"); + return NULL; +} + +/** + * it821x_probe_firmware - firmware reporting/setup + * @ap: IT821x port being probed + * + * Probe the firmware of the controller by issuing firmware command + * 0xFA and analysing the returned data. + */ + +static void it821x_probe_firmware(struct ata_port *ap) +{ + u8 *buf; + int i; + + /* This is a bit ugly as we can't just issue a task file to a device + as this is controller magic */ + + buf = it821x_firmware_command(ap, 0xFA, 512); + + if (buf != NULL) { + printk(KERN_INFO "pata_it821x: Firmware %02X/%02X/%02X%02X\n", + buf[505], + buf[506], + buf[507], + buf[508]); + for (i = 0; i < 4; i++) + it821x_display_disk(i, buf + 128 * i); + kfree(buf); + } +} + + /** * it821x_port_start - port setup @@ -602,18 +738,14 @@ static int it821x_port_start(struct ata_port *ap) struct it821x_dev *itdev; u8 conf; - int ret = ata_port_start(ap); + int ret = ata_bmdma_port_start(ap); if (ret < 0) return ret; - ap->private_data = kmalloc(sizeof(struct it821x_dev), GFP_KERNEL); - if (ap->private_data == NULL) { - ata_port_stop(ap); + itdev = devm_kzalloc(&pdev->dev, sizeof(struct it821x_dev), GFP_KERNEL); + if (itdev == NULL) return -ENOMEM; - } - - itdev = ap->private_data; - memset(itdev, 0, sizeof(struct it821x_dev)); + ap->private_data = itdev; pci_read_config_byte(pdev, 0x50, &conf); @@ -622,6 +754,8 @@ static int it821x_port_start(struct ata_port *ap) /* Long I/O's although allowed in LBA48 space cause the onboard firmware to enter the twighlight zone */ /* No ATAPI DMA in this mode either */ + if (ap->port_no == 0) + it821x_probe_firmware(ap); } /* Pull the current clocks from 0x50 */ if (conf & (1 << (1 + ap->port_no))) @@ -633,8 +767,7 @@ static int it821x_port_start(struct ata_port *ap) itdev->want[1][1] = ATA_ANY; itdev->last_device = -1; - pci_read_config_byte(pdev, PCI_REVISION_ID, &conf); - if (conf == 0x10) { + if (pdev->revision == 0x10) { itdev->timing10 = 1; /* Need to disable ATAPI DMA for this case */ if (!itdev->smart) @@ -645,113 +778,81 @@ static int it821x_port_start(struct ata_port *ap) } /** - * it821x_port_stop - port shutdown - * @ap: ATA port being removed + * it821x_rdc_cable - Cable detect for RDC1010 + * @ap: port we are checking * - * Release the private objects we added in it821x_port_start + * Return the RDC1010 cable type. Unlike the IT821x we know how to do + * this and can do host side cable detect */ -static void it821x_port_stop(struct ata_port *ap) { - kfree(ap->private_data); - ap->private_data = NULL; /* We want an OOPS if we reuse this - too late! */ - ata_port_stop(ap); +static int it821x_rdc_cable(struct ata_port *ap) +{ + u16 r40; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + + pci_read_config_word(pdev, 0x40, &r40); + if (r40 & (1 << (2 + ap->port_no))) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; } static struct scsi_host_template it821x_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - /* 255 sectors to begin with. This is locked in smart mode but not - in pass through */ - .max_sectors = 255, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations it821x_smart_port_ops = { - .set_mode = it821x_smart_set_mode, - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .mode_filter = ata_pci_default_filter, + .inherits = &ata_bmdma_port_ops, - .check_status = ata_check_status, .check_atapi_dma= it821x_check_atapi_dma, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - .dev_config = it821x_dev_config, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = it821x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = it821x_smart_qc_issue_prot, + .qc_issue = it821x_smart_qc_issue, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, + .cable_detect = ata_cable_80wire, + .set_mode = it821x_smart_set_mode, + .dev_config = it821x_dev_config, + .read_id = it821x_read_id, .port_start = it821x_port_start, - .port_stop = it821x_port_stop, - .host_stop = ata_host_stop }; static struct ata_port_operations it821x_passthru_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_bmdma_port_ops, + + .check_atapi_dma= it821x_check_atapi_dma, + .sff_dev_select = it821x_passthru_dev_select, + .bmdma_start = it821x_passthru_bmdma_start, + .bmdma_stop = it821x_passthru_bmdma_stop, + .qc_issue = it821x_passthru_qc_issue, + + .cable_detect = ata_cable_unknown, .set_piomode = it821x_passthru_set_piomode, .set_dmamode = it821x_passthru_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .check_atapi_dma= it821x_check_atapi_dma, - .dev_select = it821x_passthru_dev_select, + .port_start = it821x_port_start, +}; - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = it821x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, +static struct ata_port_operations it821x_rdc_port_ops = { + .inherits = &ata_bmdma_port_ops, - .bmdma_setup = ata_bmdma_setup, + .check_atapi_dma= it821x_check_atapi_dma, + .sff_dev_select = it821x_passthru_dev_select, .bmdma_start = it821x_passthru_bmdma_start, .bmdma_stop = it821x_passthru_bmdma_stop, - .bmdma_status = ata_bmdma_status, + .qc_issue = it821x_passthru_qc_issue, - .qc_prep = ata_qc_prep, - .qc_issue = it821x_passthru_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_clear = ata_bmdma_irq_clear, - .irq_handler = ata_interrupt, + .cable_detect = it821x_rdc_cable, + .set_piomode = it821x_passthru_set_piomode, + .set_dmamode = it821x_passthru_set_dmamode, .port_start = it821x_port_start, - .port_stop = it821x_port_stop, - .host_stop = ata_host_stop }; -static void __devinit it821x_disable_raid(struct pci_dev *pdev) +static void it821x_disable_raid(struct pci_dev *pdev) { + /* Neither the RDC nor the IT8211 */ + if (pdev->vendor != PCI_VENDOR_ID_ITE || + pdev->device != PCI_DEVICE_ID_ITE_8212) + return; + /* Reset local CPU, and set BIOS not ready */ pci_write_config_byte(pdev, 0x5E, 0x01); @@ -772,66 +873,105 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { u8 conf; - static struct ata_port_info info_smart = { - .sht = &it821x_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + static const struct ata_port_info info_smart = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &it821x_smart_port_ops }; - static struct ata_port_info info_passthru = { - .sht = &it821x_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7f, + static const struct ata_port_info info_passthru = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &it821x_passthru_port_ops }; - static struct ata_port_info *port_info[2]; + static const struct ata_port_info info_rdc = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &it821x_rdc_port_ops + }; + static const struct ata_port_info info_rdc_11 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + /* No UDMA */ + .port_ops = &it821x_rdc_port_ops + }; + const struct ata_port_info *ppi[] = { NULL, NULL }; static char *mode[2] = { "pass through", "smart" }; + int rc; - /* Force the card into bypass mode if so requested */ - if (it8212_noraid) { - printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n"); - it821x_disable_raid(pdev); - } - pci_read_config_byte(pdev, 0x50, &conf); - conf &= 1; + rc = pcim_enable_device(pdev); + if (rc) + return rc; - printk(KERN_INFO DRV_NAME ": controller in %s mode.\n", mode[conf]); - if (conf == 0) - port_info[0] = port_info[1] = &info_passthru; - else - port_info[0] = port_info[1] = &info_smart; + if (pdev->vendor == PCI_VENDOR_ID_RDC) { + /* Deal with Vortex86SX */ + if (pdev->revision == 0x11) + ppi[0] = &info_rdc_11; + else + ppi[0] = &info_rdc; + } else { + /* Force the card into bypass mode if so requested */ + if (it8212_noraid) { + printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n"); + it821x_disable_raid(pdev); + } + pci_read_config_byte(pdev, 0x50, &conf); + conf &= 1; - return ata_pci_init_one(pdev, port_info, 2); + printk(KERN_INFO DRV_NAME": controller in %s mode.\n", + mode[conf]); + if (conf == 0) + ppi[0] = &info_passthru; + else + ppi[0] = &info_smart; + } + return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0); +} + +#ifdef CONFIG_PM_SLEEP +static int it821x_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + /* Resume - turn raid back off if need be */ + if (it8212_noraid) + it821x_disable_raid(pdev); + ata_host_resume(host); + return rc; } +#endif + +static const struct pci_device_id it821x[] = { + { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), }, + { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), }, + { PCI_VDEVICE(RDC, PCI_DEVICE_ID_RDC_D1010), }, -static struct pci_device_id it821x[] = { - { PCI_DEVICE(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8211), }, - { PCI_DEVICE(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8212), }, - { 0, }, + { }, }; static struct pci_driver it821x_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = it821x, .probe = it821x_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = it821x_reinit_one, +#endif }; -static int __init it821x_init(void) -{ - return pci_register_driver(&it821x_pci_driver); -} - - -static void __exit it821x_exit(void) -{ - pci_unregister_driver(&it821x_pci_driver); -} - +module_pci_driver(it821x_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller"); @@ -839,9 +979,5 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, it821x); MODULE_VERSION(DRV_VERSION); - module_param_named(noraid, it8212_noraid, int, S_IRUGO); -MODULE_PARM_DESC(it8212_noraid, "Force card into bypass mode"); - -module_init(it821x_init); -module_exit(it821x_exit); +MODULE_PARM_DESC(noraid, "Force card into bypass mode"); diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c new file mode 100644 index 00000000000..ddf470c2341 --- /dev/null +++ b/drivers/ata/pata_ixp4xx_cf.c @@ -0,0 +1,208 @@ +/* + * ixp4xx PATA/Compact Flash driver + * Copyright (C) 2006-07 Tower Technologies + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * An ATA driver to handle a Compact Flash connected + * to the ixp4xx expansion bus in TrueIDE mode. The CF + * must have it chip selects connected to two CS lines + * on the ixp4xx. In the irq is not available, you might + * want to modify both this driver and libata to run in + * polling mode. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/libata.h> +#include <linux/irq.h> +#include <linux/platform_device.h> +#include <scsi/scsi_host.h> + +#define DRV_NAME "pata_ixp4xx_cf" +#define DRV_VERSION "0.2" + +static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error) +{ + struct ata_device *dev; + + ata_for_each_dev(dev, link, ENABLED) { + ata_dev_info(dev, "configured for PIO0\n"); + dev->pio_mode = XFER_PIO_0; + dev->xfer_mode = XFER_PIO_0; + dev->xfer_shift = ATA_SHIFT_PIO; + dev->flags |= ATA_DFLAG_PIO; + } + return 0; +} + +static unsigned int ixp4xx_mmio_data_xfer(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw) +{ + unsigned int i; + unsigned int words = buflen >> 1; + u16 *buf16 = (u16 *) buf; + struct ata_port *ap = dev->link->ap; + void __iomem *mmio = ap->ioaddr.data_addr; + struct ixp4xx_pata_data *data = dev_get_platdata(ap->host->dev); + + /* set the expansion bus in 16bit mode and restore + * 8 bit mode after the transaction. + */ + *data->cs0_cfg &= ~(0x01); + udelay(100); + + /* Transfer multiple of 2 bytes */ + if (rw == READ) + for (i = 0; i < words; i++) + buf16[i] = readw(mmio); + else + for (i = 0; i < words; i++) + writew(buf16[i], mmio); + + /* Transfer trailing 1 byte, if any. */ + if (unlikely(buflen & 0x01)) { + u16 align_buf[1] = { 0 }; + unsigned char *trailing_buf = buf + buflen - 1; + + if (rw == READ) { + align_buf[0] = readw(mmio); + memcpy(trailing_buf, align_buf, 1); + } else { + memcpy(align_buf, trailing_buf, 1); + writew(align_buf[0], mmio); + } + words++; + } + + udelay(100); + *data->cs0_cfg |= 0x01; + + return words << 1; +} + +static struct scsi_host_template ixp4xx_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static struct ata_port_operations ixp4xx_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ixp4xx_mmio_data_xfer, + .cable_detect = ata_cable_40wire, + .set_mode = ixp4xx_set_mode, +}; + +static void ixp4xx_setup_port(struct ata_port *ap, + struct ixp4xx_pata_data *data, + unsigned long raw_cs0, unsigned long raw_cs1) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned long raw_cmd = raw_cs0; + unsigned long raw_ctl = raw_cs1 + 0x06; + + ioaddr->cmd_addr = data->cs0; + ioaddr->altstatus_addr = data->cs1 + 0x06; + ioaddr->ctl_addr = data->cs1 + 0x06; + + ata_sff_std_ports(ioaddr); + +#ifndef __ARMEB__ + + /* adjust the addresses to handle the address swizzling of the + * ixp4xx in little endian mode. + */ + + *(unsigned long *)&ioaddr->data_addr ^= 0x02; + *(unsigned long *)&ioaddr->cmd_addr ^= 0x03; + *(unsigned long *)&ioaddr->altstatus_addr ^= 0x03; + *(unsigned long *)&ioaddr->ctl_addr ^= 0x03; + *(unsigned long *)&ioaddr->error_addr ^= 0x03; + *(unsigned long *)&ioaddr->feature_addr ^= 0x03; + *(unsigned long *)&ioaddr->nsect_addr ^= 0x03; + *(unsigned long *)&ioaddr->lbal_addr ^= 0x03; + *(unsigned long *)&ioaddr->lbam_addr ^= 0x03; + *(unsigned long *)&ioaddr->lbah_addr ^= 0x03; + *(unsigned long *)&ioaddr->device_addr ^= 0x03; + *(unsigned long *)&ioaddr->status_addr ^= 0x03; + *(unsigned long *)&ioaddr->command_addr ^= 0x03; + + raw_cmd ^= 0x03; + raw_ctl ^= 0x03; +#endif + + ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl); +} + +static int ixp4xx_pata_probe(struct platform_device *pdev) +{ + unsigned int irq; + struct resource *cs0, *cs1; + struct ata_host *host; + struct ata_port *ap; + struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev); + int ret; + + cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); + cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); + + if (!cs0 || !cs1) + return -EINVAL; + + /* allocate host */ + host = ata_host_alloc(&pdev->dev, 1); + if (!host) + return -ENOMEM; + + /* acquire resources and fill host */ + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000); + data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000); + + if (!data->cs0 || !data->cs1) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); + if (irq) + irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); + + /* Setup expansion bus chip selects */ + *data->cs0_cfg = data->cs0_bits; + *data->cs1_cfg = data->cs1_bits; + + ap = host->ports[0]; + + ap->ops = &ixp4xx_port_ops; + ap->pio_mask = ATA_PIO4; + ap->flags |= ATA_FLAG_NO_ATAPI; + + ixp4xx_setup_port(ap, data, cs0->start, cs1->start); + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + /* activate host */ + return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht); +} + +static struct platform_driver ixp4xx_pata_platform_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = ixp4xx_pata_probe, + .remove = ata_platform_remove_one, +}; + +module_platform_driver(ixp4xx_pata_platform_driver); + +MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); +MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index be3a866b111..4d1a5d2c428 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c @@ -4,13 +4,12 @@ * driven by AHCI in the usual configuration although * this driver can handle other setups if we need it. * - * (c) 2006 Red Hat <alan@redhat.com> + * (c) 2006 Red Hat */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> @@ -19,7 +18,7 @@ #include <linux/ata.h> #define DRV_NAME "pata_jmicron" -#define DRV_VERSION "0.1.2" +#define DRV_VERSION "0.1.5" typedef enum { PORT_PATA0 = 0, @@ -29,18 +28,19 @@ typedef enum { /** * jmicron_pre_reset - check for 40/80 pin - * @ap: Port + * @link: ATA link + * @deadline: deadline jiffies for the operation * * Perform the PATA port setup we need. - + * * On the Jmicron 361/363 there is a single PATA port that can be mapped * either as primary or secondary (or neither). We don't do any policy * and setup here. We assume that has been done by init_one and the * BIOS. */ - -static int jmicron_pre_reset(struct ata_port *ap) +static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline) { + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 control; u32 control5; @@ -79,11 +79,10 @@ static int jmicron_pre_reset(struct ata_port *ap) * actually do our cable checking etc. Thankfully we don't need * to do the plumbing for other cases. */ - switch (port_map[port]) - { + switch (port_map[port]) { case PORT_PATA0: - if (control & (1 << 5)) - return 0; + if ((control & (1 << 5)) == 0) + return -ENOENT; if (control & (1 << 3)) /* 40/80 pin primary */ ap->cbl = ATA_CBL_PATA40; else @@ -92,7 +91,7 @@ static int jmicron_pre_reset(struct ata_port *ap) case PORT_PATA1: /* Bit 21 is set if the port is enabled */ if ((control5 & (1 << 21)) == 0) - return 0; + return -ENOENT; if (control5 & (1 << 19)) /* 40/80 pin secondary */ ap->cbl = ATA_CBL_PATA40; else @@ -102,76 +101,18 @@ static int jmicron_pre_reset(struct ata_port *ap) ap->cbl = ATA_CBL_SATA; break; } - return ata_std_prereset(ap); -} - -/** - * jmicron_error_handler - Setup and error handler - * @ap: Port to handle - * - * LOCKING: - * None (inherited from caller). - */ - -static void jmicron_error_handler(struct ata_port *ap) -{ - return ata_bmdma_drive_eh(ap, jmicron_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + return ata_sff_prereset(link, deadline); } /* No PIO or DMA methods needed for this device */ static struct scsi_host_template jmicron_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - /* Special handling needed if you have sector or LBA48 limits */ - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - /* Use standard CHS mapping rules */ - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; -static const struct ata_port_operations jmicron_ops = { - .port_disable = ata_port_disable, - - /* Task file is PCI ATA format, use helpers */ - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = jmicron_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - /* BMDMA handling is PCI ATA format, use helpers */ - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - /* IRQ-related hooks */ - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - /* Generic PATA PCI ATA helpers */ - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, +static struct ata_port_operations jmicron_ops = { + .inherits = &ata_bmdma_port_ops, + .prereset = jmicron_pre_reset, }; @@ -191,49 +132,23 @@ static const struct ata_port_operations jmicron_ops = { static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id) { - static struct ata_port_info info = { - .sht = &jmicron_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &jmicron_ops, }; - struct ata_port_info *port_info[2] = { &info, &info }; - - u32 reg; - - if (id->driver_data != 368) { - /* Put the controller into AHCI mode in case the AHCI driver - has not yet been loaded. This can be done with either - function present */ + const struct ata_port_info *ppi[] = { &info, NULL }; - /* FIXME: We may want a way to override this in future */ - pci_write_config_byte(pdev, 0x41, 0xa1); - } - - /* PATA controller is fn 1, AHCI is fn 0 */ - if (PCI_FUNC(pdev->devfn) != 1) - return -ENODEV; - - if ( id->driver_data == 365 || id->driver_data == 366) { - /* The 365/66 have two PATA channels, redirect the second */ - pci_read_config_dword(pdev, 0x80, ®); - reg |= (1 << 24); /* IDE1 to PATA IDE secondary */ - pci_write_config_dword(pdev, 0x80, reg); - } - - return ata_pci_init_one(pdev, port_info, 2); + return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0); } static const struct pci_device_id jmicron_pci_tbl[] = { - { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361}, - { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363}, - { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365), 365}, - { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366), 366}, - { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368), 368}, + { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 }, { } /* terminate list */ }; @@ -242,20 +157,13 @@ static struct pci_driver jmicron_pci_driver = { .id_table = jmicron_pci_tbl, .probe = jmicron_init_one, .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init jmicron_init(void) -{ - return pci_register_driver(&jmicron_pci_driver); -} - -static void __exit jmicron_exit(void) -{ - pci_unregister_driver(&jmicron_pci_driver); -} - -module_init(jmicron_init); -module_exit(jmicron_exit); +module_pci_driver(jmicron_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports"); diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index 10231ef731d..bce2a8ca467 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -1,6 +1,6 @@ /* * pata-legacy.c - Legacy port PATA/SATA controller driver. - * Copyright 2005/2006 Red Hat <alan@redhat.com>, all rights reserved. + * Copyright 2005/2006 Red Hat, all rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,10 +25,16 @@ * http://www.ryston.cz/petr/vlb/pdc20230b.html * http://www.ryston.cz/petr/vlb/pdc20230c.html * http://www.ryston.cz/petr/vlb/pdc20630.html + * QDI65x0: + * http://www.ryston.cz/petr/vlb/qd6500.html + * http://www.ryston.cz/petr/vlb/qd6580.html + * + * QDI65x0 probe code based on drivers/ide/legacy/qd65xx.c + * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by + * Samuel Thibault <samuel.thibault@ens-lyon.org> * * Unsupported but docs exist: * Appian/Adaptec AIC25VL01/Cirrus Logic PD7220 - * Winbond W83759A * * This driver handles legacy (that is "ISA/VLB side") IDE ports found * on PC class systems. There are three hybrid devices that are exceptions @@ -36,7 +42,10 @@ * the MPIIX where the tuning is PCI side but the IDE is "ISA side". * * Specific support is included for the ht6560a/ht6560b/opti82c611a/ - * opti82c465mv/promise 20230c/20630 + * opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A + * + * Support for the Winbond 83759A when operating in advanced mode. + * Multichip mode is not currently supported. * * Use the autospeed and pio_mask options with: * Appian ADI/2 aka CLPD7220 or AIC25VL01. @@ -47,11 +56,9 @@ * For now use autospeed and pio_mask as above with the W83759A. This may * change. * - * TODO - * Merge existing pata_qdi driver - * */ +#include <linux/async.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> @@ -64,38 +71,139 @@ #include <linux/platform_device.h> #define DRV_NAME "pata_legacy" -#define DRV_VERSION "0.5.3" +#define DRV_VERSION "0.6.5" #define NR_HOST 6 -static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 }; -static int legacy_irq[NR_HOST] = { 15, 14, 11, 10, 8, 12 }; +static int all; +module_param(all, int, 0444); +MODULE_PARM_DESC(all, "Grab all legacy port devices, even if PCI(0=off, 1=on)"); + +enum controller { + BIOS = 0, + SNOOP = 1, + PDC20230 = 2, + HT6560A = 3, + HT6560B = 4, + OPTI611A = 5, + OPTI46X = 6, + QDI6500 = 7, + QDI6580 = 8, + QDI6580DP = 9, /* Dual channel mode is different */ + W83759A = 10, + + UNKNOWN = -1 +}; struct legacy_data { unsigned long timing; u8 clock[2]; u8 last; int fast; + enum controller type; struct platform_device *platform_dev; +}; + +struct legacy_probe { + unsigned char *name; + unsigned long port; + unsigned int irq; + unsigned int slot; + enum controller type; + unsigned long private; +}; +struct legacy_controller { + const char *name; + struct ata_port_operations *ops; + unsigned int pio_mask; + unsigned int flags; + unsigned int pflags; + int (*setup)(struct platform_device *, struct legacy_probe *probe, + struct legacy_data *data); }; +static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 }; + +static struct legacy_probe probe_list[NR_HOST]; static struct legacy_data legacy_data[NR_HOST]; static struct ata_host *legacy_host[NR_HOST]; static int nr_legacy_host; -static int probe_all; /* Set to check all ISA port ranges */ -static int ht6560a; /* HT 6560A on primary 1, secondary 2, both 3 */ -static int ht6560b; /* HT 6560A on primary 1, secondary 2, both 3 */ -static int opti82c611a; /* Opti82c611A on primary 1, secondary 2, both 3 */ -static int opti82c46x; /* Opti 82c465MV present (pri/sec autodetect) */ -static int autospeed; /* Chip present which snoops speed changes */ -static int pio_mask = 0x1F; /* PIO range for autospeed devices */ +static int probe_all; /* Set to check all ISA port ranges */ +static int ht6560a; /* HT 6560A on primary 1, second 2, both 3 */ +static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */ +static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */ +static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */ +static int autospeed; /* Chip present which snoops speed changes */ +static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ +static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ + +/* Set to probe QDI controllers */ +#ifdef CONFIG_PATA_QDI_MODULE +static int qdi = 1; +#else +static int qdi; +#endif + +#ifdef CONFIG_PATA_WINBOND_VLB_MODULE +static int winbond = 1; /* Set to probe Winbond controllers, + give I/O port if non standard */ +#else +static int winbond; /* Set to probe Winbond controllers, + give I/O port if non standard */ +#endif + +/** + * legacy_probe_add - Add interface to probe list + * @port: Controller port + * @irq: IRQ number + * @type: Controller type + * @private: Controller specific info + * + * Add an entry into the probe list for ATA controllers. This is used + * to add the default ISA slots and then to build up the table + * further according to other ISA/VLB/Weird device scans + * + * An I/O port list is used to keep ordering stable and sane, as we + * don't have any good way to talk about ordering otherwise + */ + +static int legacy_probe_add(unsigned long port, unsigned int irq, + enum controller type, unsigned long private) +{ + struct legacy_probe *lp = &probe_list[0]; + int i; + struct legacy_probe *free = NULL; + + for (i = 0; i < NR_HOST; i++) { + if (lp->port == 0 && free == NULL) + free = lp; + /* Matching port, or the correct slot for ordering */ + if (lp->port == port || legacy_port[i] == port) { + free = lp; + break; + } + lp++; + } + if (free == NULL) { + printk(KERN_ERR "pata_legacy: Too many interfaces.\n"); + return -1; + } + /* Fill in the entry for later probing */ + free->port = port; + free->irq = irq; + free->type = type; + free->private = private; + return 0; +} + /** * legacy_set_mode - mode setting - * @ap: IDE interface + * @link: IDE link + * @unused: Device that failed when error is returned * * Use a non standard set_mode function. We don't want to be tuned. * @@ -105,37 +213,27 @@ static int pio_mask = 0x1F; /* PIO range for autospeed devices */ * expand on this as per hdparm in the base kernel. */ -static void legacy_set_mode(struct ata_port *ap) +static int legacy_set_mode(struct ata_link *link, struct ata_device **unused) { - int i; - - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; - if (ata_dev_enabled(dev)) { - dev->pio_mode = XFER_PIO_0; - dev->xfer_mode = XFER_PIO_0; - dev->xfer_shift = ATA_SHIFT_PIO; - dev->flags |= ATA_DFLAG_PIO; - } + struct ata_device *dev; + + ata_for_each_dev(dev, link, ENABLED) { + ata_dev_info(dev, "configured for PIO\n"); + dev->pio_mode = XFER_PIO_0; + dev->xfer_mode = XFER_PIO_0; + dev->xfer_shift = ATA_SHIFT_PIO; + dev->flags |= ATA_DFLAG_PIO; } + return 0; } static struct scsi_host_template legacy_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_PIO_SHT(DRV_NAME), +}; + +static const struct ata_port_operations legacy_base_port_ops = { + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, }; /* @@ -147,62 +245,23 @@ static struct scsi_host_template legacy_sht = { */ static struct ata_port_operations simple_port_ops = { - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ata_bmdma_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer_noirq, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .inherits = &legacy_base_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, }; static struct ata_port_operations legacy_port_ops = { + .inherits = &legacy_base_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, .set_mode = legacy_set_mode, - - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .error_handler = ata_bmdma_error_handler, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer_noirq, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; /* * Promise 20230C and 20620 support * - * This controller supports PIO0 to PIO2. We set PIO timings conservatively to - * allow for 50MHz Vesa Local Bus. The 20620 DMA support is weird being DMA to - * controller and PIO'd to the host and not supported. + * This controller supports PIO0 to PIO2. We set PIO timings + * conservatively to allow for 50MHz Vesa Local Bus. The 20620 DMA + * support is weird being DMA to controller and PIO'd to the host + * and not supported. */ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev) @@ -217,8 +276,7 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev) local_irq_save(flags); /* Unlock the control interface */ - do - { + do { inb(0x1F5); outb(inb(0x1F2) | 0x80, 0x1F2); inb(0x1F2); @@ -227,7 +285,7 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev) inb(0x1F2); inb(0x1F2); } - while((inb(0x1F2) & 0x80) && --tries); + while ((inb(0x1F2) & 0x80) && --tries); local_irq_restore(flags); @@ -245,73 +303,59 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev) } -static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) +static unsigned int pdc_data_xfer_vlb(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw) { - struct ata_port *ap = adev->ap; int slop = buflen & 3; - unsigned long flags; + struct ata_port *ap = dev->link->ap; + + /* 32bit I/O capable *and* we need to write a whole number of dwords */ + if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3) + && (ap->pflags & ATA_PFLAG_PIO32)) { + unsigned long flags; - if (ata_id_has_dword_io(adev->id)) { local_irq_save(flags); /* Perform the 32bit I/O synchronization sequence */ - inb(ap->ioaddr.nsect_addr); - inb(ap->ioaddr.nsect_addr); - inb(ap->ioaddr.nsect_addr); + ioread8(ap->ioaddr.nsect_addr); + ioread8(ap->ioaddr.nsect_addr); + ioread8(ap->ioaddr.nsect_addr); /* Now the data */ - - if (write_data) - outsl(ap->ioaddr.data_addr, buf, buflen >> 2); + if (rw == READ) + ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); else - insl(ap->ioaddr.data_addr, buf, buflen >> 2); + iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); if (unlikely(slop)) { - u32 pad; - if (write_data) { - memcpy(&pad, buf + buflen - slop, slop); - outl(le32_to_cpu(pad), ap->ioaddr.data_addr); - } else { - pad = cpu_to_le16(inl(ap->ioaddr.data_addr)); + __le32 pad; + if (rw == READ) { + pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); memcpy(buf + buflen - slop, &pad, slop); + } else { + memcpy(&pad, buf + buflen - slop, slop); + iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr); } + buflen += 4 - slop; } local_irq_restore(flags); - } - else - ata_pio_data_xfer_noirq(adev, buf, buflen, write_data); + } else + buflen = ata_sff_data_xfer_noirq(dev, buf, buflen, rw); + + return buflen; } static struct ata_port_operations pdc20230_port_ops = { + .inherits = &legacy_base_port_ops, .set_piomode = pdc20230_set_piomode, - - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .error_handler = ata_bmdma_error_handler, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = pdc_data_xfer_vlb, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .sff_data_xfer = pdc_data_xfer_vlb, }; /* * Holtek 6560A support * - * This controller supports PIO0 to PIO2 (no IORDY even though higher timings - * can be loaded). + * This controller supports PIO0 to PIO2 (no IORDY even though higher + * timings can be loaded). */ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev) @@ -322,48 +366,28 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev) /* Get the timing data in cycles. For now play safe at 50Mhz */ ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000); - active = FIT(t.active, 2, 15); - recover = FIT(t.recover, 4, 15); + active = clamp_val(t.active, 2, 15); + recover = clamp_val(t.recover, 4, 15); inb(0x3E6); inb(0x3E6); inb(0x3E6); inb(0x3E6); - outb(recover << 4 | active, ap->ioaddr.device_addr); - inb(ap->ioaddr.status_addr); + iowrite8(recover << 4 | active, ap->ioaddr.device_addr); + ioread8(ap->ioaddr.status_addr); } static struct ata_port_operations ht6560a_port_ops = { + .inherits = &legacy_base_port_ops, .set_piomode = ht6560a_set_piomode, - - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .error_handler = ata_bmdma_error_handler, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, /* Check vlb/noirq */ - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; /* * Holtek 6560B support * - * This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO setting - * unless we see an ATAPI device in which case we force it off. + * This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO + * setting unless we see an ATAPI device in which case we force it off. * * FIXME: need to implement 2nd channel support. */ @@ -376,50 +400,29 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev) /* Get the timing data in cycles. For now play safe at 50Mhz */ ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000); - active = FIT(t.active, 2, 15); - recover = FIT(t.recover, 2, 16); - recover &= 0x15; + active = clamp_val(t.active, 2, 15); + recover = clamp_val(t.recover, 2, 16) & 0x0F; inb(0x3E6); inb(0x3E6); inb(0x3E6); inb(0x3E6); - outb(recover << 4 | active, ap->ioaddr.device_addr); + iowrite8(recover << 4 | active, ap->ioaddr.device_addr); if (adev->class != ATA_DEV_ATA) { u8 rconf = inb(0x3E6); if (rconf & 0x24) { - rconf &= ~ 0x24; + rconf &= ~0x24; outb(rconf, 0x3E6); } } - inb(ap->ioaddr.status_addr); + ioread8(ap->ioaddr.status_addr); } static struct ata_port_operations ht6560b_port_ops = { + .inherits = &legacy_base_port_ops, .set_piomode = ht6560b_set_piomode, - - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .error_handler = ata_bmdma_error_handler, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, /* FIXME: Check 32bit and noirq */ - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; /* @@ -452,7 +455,8 @@ static u8 opti_syscfg(u8 reg) * This controller supports PIO0 to PIO3. */ -static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev) +static void opti82c611a_set_piomode(struct ata_port *ap, + struct ata_device *adev) { u8 active, recover, setup; struct ata_timing t; @@ -462,12 +466,12 @@ static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev u8 rc; /* Enter configuration mode */ - inw(ap->ioaddr.error_addr); - inw(ap->ioaddr.error_addr); - outb(3, ap->ioaddr.nsect_addr); + ioread16(ap->ioaddr.error_addr); + ioread16(ap->ioaddr.error_addr); + iowrite8(3, ap->ioaddr.nsect_addr); /* Read VLB clock strapping */ - clock = 1000000000 / khz[inb(ap->ioaddr.lbah_addr) & 0x03]; + clock = 1000000000 / khz[ioread8(ap->ioaddr.lbah_addr) & 0x03]; /* Get the timing data in cycles */ ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000); @@ -480,64 +484,44 @@ static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); } - active = FIT(t.active, 2, 17) - 2; - recover = FIT(t.recover, 1, 16) - 1; - setup = FIT(t.setup, 1, 4) - 1; + active = clamp_val(t.active, 2, 17) - 2; + recover = clamp_val(t.recover, 1, 16) - 1; + setup = clamp_val(t.setup, 1, 4) - 1; /* Select the right timing bank for write timing */ - rc = inb(ap->ioaddr.lbal_addr); + rc = ioread8(ap->ioaddr.lbal_addr); rc &= 0x7F; rc |= (adev->devno << 7); - outb(rc, ap->ioaddr.lbal_addr); + iowrite8(rc, ap->ioaddr.lbal_addr); /* Write the timings */ - outb(active << 4 | recover, ap->ioaddr.error_addr); + iowrite8(active << 4 | recover, ap->ioaddr.error_addr); /* Select the right bank for read timings, also load the shared timings for address */ - rc = inb(ap->ioaddr.device_addr); + rc = ioread8(ap->ioaddr.device_addr); rc &= 0xC0; rc |= adev->devno; /* Index select */ rc |= (setup << 4) | 0x04; - outb(rc, ap->ioaddr.device_addr); + iowrite8(rc, ap->ioaddr.device_addr); /* Load the read timings */ - outb(active << 4 | recover, ap->ioaddr.data_addr); + iowrite8(active << 4 | recover, ap->ioaddr.data_addr); /* Ensure the timing register mode is right */ - rc = inb (ap->ioaddr.lbal_addr); + rc = ioread8(ap->ioaddr.lbal_addr); rc &= 0x73; rc |= 0x84; - outb(rc, ap->ioaddr.lbal_addr); + iowrite8(rc, ap->ioaddr.lbal_addr); /* Exit command mode */ - outb(0x83, ap->ioaddr.nsect_addr); + iowrite8(0x83, ap->ioaddr.nsect_addr); } static struct ata_port_operations opti82c611a_port_ops = { + .inherits = &legacy_base_port_ops, .set_piomode = opti82c611a_set_piomode, - - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .error_handler = ata_bmdma_error_handler, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; /* @@ -558,12 +542,12 @@ static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev) u8 sysclk; /* Get the clock */ - sysclk = opti_syscfg(0xAC) & 0xC0; /* BIOS set */ + sysclk = (opti_syscfg(0xAC) & 0xC0) >> 6; /* BIOS set */ /* Enter configuration mode */ - inw(ap->ioaddr.error_addr); - inw(ap->ioaddr.error_addr); - outb(3, ap->ioaddr.nsect_addr); + ioread16(ap->ioaddr.error_addr); + ioread16(ap->ioaddr.error_addr); + iowrite8(3, ap->ioaddr.nsect_addr); /* Read VLB clock strapping */ clock = 1000000000 / khz[sysclk]; @@ -579,45 +563,45 @@ static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev) ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); } - active = FIT(t.active, 2, 17) - 2; - recover = FIT(t.recover, 1, 16) - 1; - setup = FIT(t.setup, 1, 4) - 1; + active = clamp_val(t.active, 2, 17) - 2; + recover = clamp_val(t.recover, 1, 16) - 1; + setup = clamp_val(t.setup, 1, 4) - 1; /* Select the right timing bank for write timing */ - rc = inb(ap->ioaddr.lbal_addr); + rc = ioread8(ap->ioaddr.lbal_addr); rc &= 0x7F; rc |= (adev->devno << 7); - outb(rc, ap->ioaddr.lbal_addr); + iowrite8(rc, ap->ioaddr.lbal_addr); /* Write the timings */ - outb(active << 4 | recover, ap->ioaddr.error_addr); + iowrite8(active << 4 | recover, ap->ioaddr.error_addr); /* Select the right bank for read timings, also load the shared timings for address */ - rc = inb(ap->ioaddr.device_addr); + rc = ioread8(ap->ioaddr.device_addr); rc &= 0xC0; rc |= adev->devno; /* Index select */ rc |= (setup << 4) | 0x04; - outb(rc, ap->ioaddr.device_addr); + iowrite8(rc, ap->ioaddr.device_addr); /* Load the read timings */ - outb(active << 4 | recover, ap->ioaddr.data_addr); + iowrite8(active << 4 | recover, ap->ioaddr.data_addr); /* Ensure the timing register mode is right */ - rc = inb (ap->ioaddr.lbal_addr); + rc = ioread8(ap->ioaddr.lbal_addr); rc &= 0x73; rc |= 0x84; - outb(rc, ap->ioaddr.lbal_addr); + iowrite8(rc, ap->ioaddr.lbal_addr); /* Exit command mode */ - outb(0x83, ap->ioaddr.nsect_addr); + iowrite8(0x83, ap->ioaddr.nsect_addr); /* We need to know this for quad device on the MVB */ ap->host->private_data = ap; } /** - * opt82c465mv_qc_issue_prot - command issue + * opt82c465mv_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap @@ -631,7 +615,7 @@ static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev) * FIXME: dual channel needs ->serialize support */ -static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc) +static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; @@ -642,92 +626,272 @@ static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc) && ap->host->private_data != NULL) opti82c46x_set_piomode(ap, adev); - return ata_qc_issue_prot(qc); + return ata_sff_qc_issue(qc); } static struct ata_port_operations opti82c46x_port_ops = { + .inherits = &legacy_base_port_ops, .set_piomode = opti82c46x_set_piomode, + .qc_issue = opti82c46x_qc_issue, +}; - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .error_handler = ata_bmdma_error_handler, - - .qc_prep = ata_qc_prep, - .qc_issue = opti82c46x_qc_issue_prot, +/** + * qdi65x0_set_piomode - PIO setup for QDI65x0 + * @ap: Port + * @adev: Device + * + * In single channel mode the 6580 has one clock per device and we can + * avoid the requirement to clock switch. We also have to load the timing + * into the right clock according to whether we are master or slave. + * + * In dual channel mode the 6580 has one clock per channel and we have + * to software clockswitch in qc_issue. + */ - .data_xfer = ata_pio_data_xfer, +static void qdi65x0_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + struct ata_timing t; + struct legacy_data *ld_qdi = ap->host->private_data; + int active, recovery; + u8 timing; - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, + /* Get the timing data in cycles */ + ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); + + if (ld_qdi->fast) { + active = 8 - clamp_val(t.active, 1, 8); + recovery = 18 - clamp_val(t.recover, 3, 18); + } else { + active = 9 - clamp_val(t.active, 2, 9); + recovery = 15 - clamp_val(t.recover, 0, 15); + } + timing = (recovery << 4) | active | 0x08; + ld_qdi->clock[adev->devno] = timing; - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop -}; + if (ld_qdi->type == QDI6580) + outb(timing, ld_qdi->timing + 2 * adev->devno); + else + outb(timing, ld_qdi->timing + 2 * ap->port_no); + /* Clear the FIFO */ + if (ld_qdi->type != QDI6500 && adev->class != ATA_DEV_ATA) + outb(0x5F, (ld_qdi->timing & 0xFFF0) + 3); +} /** - * legacy_init_one - attach a legacy interface - * @port: port number - * @io: I/O port start - * @ctrl: control port - * @irq: interrupt line + * qdi_qc_issue - command issue + * @qc: command pending * - * Register an ISA bus IDE interface. Such interfaces are PIO and we - * assume do not support IRQ sharing. + * Called when the libata layer is about to issue a command. We wrap + * this interface so that we can load the correct ATA timings. */ -static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl, int irq) +static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc) { - struct legacy_data *ld = &legacy_data[nr_legacy_host]; - struct ata_probe_ent ae; - struct platform_device *pdev; - int ret = -EBUSY; - struct ata_port_operations *ops = &legacy_port_ops; - int pio_modes = pio_mask; - u32 mask = (1 << port); + struct ata_port *ap = qc->ap; + struct ata_device *adev = qc->dev; + struct legacy_data *ld_qdi = ap->host->private_data; + + if (ld_qdi->clock[adev->devno] != ld_qdi->last) { + if (adev->pio_mode) { + ld_qdi->last = ld_qdi->clock[adev->devno]; + outb(ld_qdi->clock[adev->devno], ld_qdi->timing + + 2 * ap->port_no); + } + } + return ata_sff_qc_issue(qc); +} + +static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf, + unsigned int buflen, int rw) +{ + struct ata_port *ap = adev->link->ap; + int slop = buflen & 3; + + if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3) + && (ap->pflags & ATA_PFLAG_PIO32)) { + if (rw == WRITE) + iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); + else + ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); + + if (unlikely(slop)) { + __le32 pad; + if (rw == WRITE) { + memcpy(&pad, buf + buflen - slop, slop); + iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr); + } else { + pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); + memcpy(buf + buflen - slop, &pad, slop); + } + } + return (buflen + 3) & ~3; + } else + return ata_sff_data_xfer(adev, buf, buflen, rw); +} - if (request_region(io, 8, "pata_legacy") == NULL) +static int qdi_port(struct platform_device *dev, + struct legacy_probe *lp, struct legacy_data *ld) +{ + if (devm_request_region(&dev->dev, lp->private, 4, "qdi") == NULL) return -EBUSY; - if (request_region(ctrl, 1, "pata_legacy") == NULL) - goto fail_io; + ld->timing = lp->private; + return 0; +} - pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0); - if (pdev == NULL) - goto fail_dev; +static struct ata_port_operations qdi6500_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = qdi65x0_set_piomode, + .qc_issue = qdi_qc_issue, + .sff_data_xfer = vlb32_data_xfer, +}; - if (ht6560a & mask) { - ops = &ht6560a_port_ops; - pio_modes = 0x07; - } - if (ht6560b & mask) { - ops = &ht6560b_port_ops; - pio_modes = 0x1F; - } - if (opti82c611a & mask) { - ops = &opti82c611a_port_ops; - pio_modes = 0x0F; - } - if (opti82c46x & mask) { - ops = &opti82c46x_port_ops; - pio_modes = 0x0F; - } +static struct ata_port_operations qdi6580_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = qdi65x0_set_piomode, + .sff_data_xfer = vlb32_data_xfer, +}; - /* Probe for automatically detectable controllers */ +static struct ata_port_operations qdi6580dp_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = qdi65x0_set_piomode, + .qc_issue = qdi_qc_issue, + .sff_data_xfer = vlb32_data_xfer, +}; - if (io == 0x1F0 && ops == &legacy_port_ops) { - unsigned long flags; +static DEFINE_SPINLOCK(winbond_lock); - local_irq_save(flags); +static void winbond_writecfg(unsigned long port, u8 reg, u8 val) +{ + unsigned long flags; + spin_lock_irqsave(&winbond_lock, flags); + outb(reg, port + 0x01); + outb(val, port + 0x02); + spin_unlock_irqrestore(&winbond_lock, flags); +} + +static u8 winbond_readcfg(unsigned long port, u8 reg) +{ + u8 val; + + unsigned long flags; + spin_lock_irqsave(&winbond_lock, flags); + outb(reg, port + 0x01); + val = inb(port + 0x02); + spin_unlock_irqrestore(&winbond_lock, flags); + + return val; +} + +static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + struct ata_timing t; + struct legacy_data *ld_winbond = ap->host->private_data; + int active, recovery; + u8 reg; + int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2); + + reg = winbond_readcfg(ld_winbond->timing, 0x81); + + /* Get the timing data in cycles */ + if (reg & 0x40) /* Fast VLB bus, assume 50MHz */ + ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000); + else + ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); + + active = (clamp_val(t.active, 3, 17) - 1) & 0x0F; + recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F; + timing = (active << 4) | recovery; + winbond_writecfg(ld_winbond->timing, timing, reg); + + /* Load the setup timing */ + + reg = 0x35; + if (adev->class != ATA_DEV_ATA) + reg |= 0x08; /* FIFO off */ + if (!ata_pio_need_iordy(adev)) + reg |= 0x02; /* IORDY off */ + reg |= (clamp_val(t.setup, 0, 3) << 6); + winbond_writecfg(ld_winbond->timing, timing + 1, reg); +} + +static int winbond_port(struct platform_device *dev, + struct legacy_probe *lp, struct legacy_data *ld) +{ + if (devm_request_region(&dev->dev, lp->private, 4, "winbond") == NULL) + return -EBUSY; + ld->timing = lp->private; + return 0; +} +static struct ata_port_operations winbond_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = winbond_set_piomode, + .sff_data_xfer = vlb32_data_xfer, +}; + +static struct legacy_controller controllers[] = { + {"BIOS", &legacy_port_ops, ATA_PIO4, + ATA_FLAG_NO_IORDY, 0, NULL }, + {"Snooping", &simple_port_ops, ATA_PIO4, + 0, 0, NULL }, + {"PDC20230", &pdc20230_port_ops, ATA_PIO2, + ATA_FLAG_NO_IORDY, + ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, NULL }, + {"HT6560A", &ht6560a_port_ops, ATA_PIO2, + ATA_FLAG_NO_IORDY, 0, NULL }, + {"HT6560B", &ht6560b_port_ops, ATA_PIO4, + ATA_FLAG_NO_IORDY, 0, NULL }, + {"OPTI82C611A", &opti82c611a_port_ops, ATA_PIO3, + 0, 0, NULL }, + {"OPTI82C46X", &opti82c46x_port_ops, ATA_PIO3, + 0, 0, NULL }, + {"QDI6500", &qdi6500_port_ops, ATA_PIO2, + ATA_FLAG_NO_IORDY, + ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port }, + {"QDI6580", &qdi6580_port_ops, ATA_PIO4, + 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port }, + {"QDI6580DP", &qdi6580dp_port_ops, ATA_PIO4, + 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port }, + {"W83759A", &winbond_port_ops, ATA_PIO4, + 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, + winbond_port } +}; + +/** + * probe_chip_type - Discover controller + * @probe: Probe entry to check + * + * Probe an ATA port and identify the type of controller. We don't + * check if the controller appears to be driveless at this point. + */ + +static __init int probe_chip_type(struct legacy_probe *probe) +{ + int mask = 1 << probe->slot; + + if (winbond && (probe->port == 0x1F0 || probe->port == 0x170)) { + u8 reg = winbond_readcfg(winbond, 0x81); + reg |= 0x80; /* jumpered mode off */ + winbond_writecfg(winbond, 0x81, reg); + reg = winbond_readcfg(winbond, 0x83); + reg |= 0xF0; /* local control */ + winbond_writecfg(winbond, 0x83, reg); + reg = winbond_readcfg(winbond, 0x85); + reg |= 0xF0; /* programmable timing */ + winbond_writecfg(winbond, 0x85, reg); + + reg = winbond_readcfg(winbond, 0x81); + + if (reg & mask) + return W83759A; + } + if (probe->port == 0x1F0) { + unsigned long flags; + local_irq_save(flags); /* Probes */ - inb(0x1F5); outb(inb(0x1F2) | 0x80, 0x1F2); + inb(0x1F5); inb(0x1F2); inb(0x3F6); inb(0x3F6); @@ -736,57 +900,120 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl if ((inb(0x1F2) & 0x80) == 0) { /* PDC20230c or 20630 ? */ - printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller detected.\n"); - pio_modes = 0x07; - ops = &pdc20230_port_ops; + printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller" + " detected.\n"); udelay(100); inb(0x1F5); + local_irq_restore(flags); + return PDC20230; } else { outb(0x55, 0x1F2); inb(0x1F2); inb(0x1F2); - if (inb(0x1F2) == 0x00) { - printk(KERN_INFO "PDC20230-B VLB ATA controller detected.\n"); - } + if (inb(0x1F2) == 0x00) + printk(KERN_INFO "PDC20230-B VLB ATA " + "controller detected.\n"); + local_irq_restore(flags); + return BIOS; } - local_irq_restore(flags); } + if (ht6560a & mask) + return HT6560A; + if (ht6560b & mask) + return HT6560B; + if (opti82c611a & mask) + return OPTI611A; + if (opti82c46x & mask) + return OPTI46X; + if (autospeed & mask) + return SNOOP; + return BIOS; +} + + +/** + * legacy_init_one - attach a legacy interface + * @pl: probe record + * + * Register an ISA bus IDE interface. Such interfaces are PIO and we + * assume do not support IRQ sharing. + */ - /* Chip does mode setting by command snooping */ - if (ops == &legacy_port_ops && (autospeed & mask)) - ops = &simple_port_ops; - memset(&ae, 0, sizeof(struct ata_probe_ent)); - INIT_LIST_HEAD(&ae.node); - ae.dev = &pdev->dev; - ae.port_ops = ops; - ae.sht = &legacy_sht; - ae.n_ports = 1; - ae.pio_mask = pio_modes; - ae.irq = irq; - ae.irq_flags = 0; - ae.port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST; - ae.port[0].cmd_addr = io; - ae.port[0].altstatus_addr = ctrl; - ae.port[0].ctl_addr = ctrl; - ata_std_ports(&ae.port[0]); - ae.private_data = ld; - - ret = ata_device_add(&ae); - if (ret == 0) { - ret = -ENODEV; +static __init int legacy_init_one(struct legacy_probe *probe) +{ + struct legacy_controller *controller = &controllers[probe->type]; + int pio_modes = controller->pio_mask; + unsigned long io = probe->port; + u32 mask = (1 << probe->slot); + struct ata_port_operations *ops = controller->ops; + struct legacy_data *ld = &legacy_data[probe->slot]; + struct ata_host *host = NULL; + struct ata_port *ap; + struct platform_device *pdev; + struct ata_device *dev; + void __iomem *io_addr, *ctrl_addr; + u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY; + int ret; + + iordy |= controller->flags; + + pdev = platform_device_register_simple(DRV_NAME, probe->slot, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + ret = -EBUSY; + if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL || + devm_request_region(&pdev->dev, io + 0x0206, 1, + "pata_legacy") == NULL) goto fail; - } - legacy_host[nr_legacy_host++] = dev_get_drvdata(&pdev->dev); + + ret = -ENOMEM; + io_addr = devm_ioport_map(&pdev->dev, io, 8); + ctrl_addr = devm_ioport_map(&pdev->dev, io + 0x0206, 1); + if (!io_addr || !ctrl_addr) + goto fail; + ld->type = probe->type; + if (controller->setup) + if (controller->setup(pdev, probe, ld) < 0) + goto fail; + host = ata_host_alloc(&pdev->dev, 1); + if (!host) + goto fail; + ap = host->ports[0]; + + ap->ops = ops; + ap->pio_mask = pio_modes; + ap->flags |= ATA_FLAG_SLAVE_POSS | iordy; + ap->pflags |= controller->pflags; + ap->ioaddr.cmd_addr = io_addr; + ap->ioaddr.altstatus_addr = ctrl_addr; + ap->ioaddr.ctl_addr = ctrl_addr; + ata_sff_std_ports(&ap->ioaddr); + ap->host->private_data = ld; + + ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206); + + ret = ata_host_activate(host, probe->irq, ata_sff_interrupt, 0, + &legacy_sht); + if (ret) + goto fail; + async_synchronize_full(); ld->platform_dev = pdev; - return 0; + /* Nothing found means we drop the port as its probably not there */ + + ret = -ENODEV; + ata_for_each_dev(dev, &ap->link, ALL) { + if (!ata_dev_absent(dev)) { + legacy_host[probe->slot] = host; + ld->platform_dev = pdev; + return 0; + } + } + ata_host_detach(host); fail: platform_device_unregister(pdev); -fail_dev: - release_region(ctrl, 1); -fail_io: - release_region(io, 8); return ret; } @@ -796,13 +1023,15 @@ fail_io: * @master: set this if we find an ATA master * @master: set this if we find an ATA secondary * - * A small number of vendors implemented early PCI ATA interfaces on bridge logic - * without the ATA interface being PCI visible. Where we have a matching PCI driver - * we must skip the relevant device here. If we don't know about it then the legacy - * driver is the right driver anyway. + * A small number of vendors implemented early PCI ATA interfaces + * on bridge logic without the ATA interface being PCI visible. + * Where we have a matching PCI driver we must skip the relevant + * device here. If we don't know about it then the legacy driver + * is the right driver anyway. */ -static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *secondary) +static void __init legacy_check_special_cases(struct pci_dev *p, int *primary, + int *secondary) { /* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */ if (p->vendor == 0x1078 && p->device == 0x0000) { @@ -818,7 +1047,8 @@ static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *sec if (p->vendor == 0x8086 && p->device == 0x1234) { u16 r; pci_read_config_word(p, 0x6C, &r); - if (r & 0x8000) { /* ATA port enabled */ + if (r & 0x8000) { + /* ATA port enabled */ if (r & 0x4000) *secondary = 1; else @@ -828,6 +1058,114 @@ static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *sec } } +static __init void probe_opti_vlb(void) +{ + /* If an OPTI 82C46X is present find out where the channels are */ + static const char *optis[4] = { + "3/463MV", "5MV", + "5MVA", "5MVB" + }; + u8 chans = 1; + u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6; + + opti82c46x = 3; /* Assume master and slave first */ + printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n", + optis[ctrl]); + if (ctrl == 3) + chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1; + ctrl = opti_syscfg(0xAC); + /* Check enabled and this port is the 465MV port. On the + MVB we may have two channels */ + if (ctrl & 8) { + if (chans == 2) { + legacy_probe_add(0x1F0, 14, OPTI46X, 0); + legacy_probe_add(0x170, 15, OPTI46X, 0); + } + if (ctrl & 4) + legacy_probe_add(0x170, 15, OPTI46X, 0); + else + legacy_probe_add(0x1F0, 14, OPTI46X, 0); + } else + legacy_probe_add(0x1F0, 14, OPTI46X, 0); +} + +static __init void qdi65_identify_port(u8 r, u8 res, unsigned long port) +{ + static const unsigned long ide_port[2] = { 0x170, 0x1F0 }; + /* Check card type */ + if ((r & 0xF0) == 0xC0) { + /* QD6500: single channel */ + if (r & 8) + /* Disabled ? */ + return; + legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01), + QDI6500, port); + } + if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) { + /* QD6580: dual channel */ + if (!request_region(port + 2 , 2, "pata_qdi")) { + release_region(port, 2); + return; + } + res = inb(port + 3); + /* Single channel mode ? */ + if (res & 1) + legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01), + QDI6580, port); + else { /* Dual channel mode */ + legacy_probe_add(0x1F0, 14, QDI6580DP, port); + /* port + 0x02, r & 0x04 */ + legacy_probe_add(0x170, 15, QDI6580DP, port + 2); + } + release_region(port + 2, 2); + } +} + +static __init void probe_qdi_vlb(void) +{ + unsigned long flags; + static const unsigned long qd_port[2] = { 0x30, 0xB0 }; + int i; + + /* + * Check each possible QD65xx base address + */ + + for (i = 0; i < 2; i++) { + unsigned long port = qd_port[i]; + u8 r, res; + + + if (request_region(port, 2, "pata_qdi")) { + /* Check for a card */ + local_irq_save(flags); + /* I have no h/w that needs this delay but it + is present in the historic code */ + r = inb(port); + udelay(1); + outb(0x19, port); + udelay(1); + res = inb(port); + udelay(1); + outb(r, port); + udelay(1); + local_irq_restore(flags); + + /* Fail */ + if (res == 0x19) { + release_region(port, 2); + continue; + } + /* Passes the presence test */ + r = inb(port + 1); + udelay(1); + /* Check port agrees with port set */ + if ((r & 2) >> 1 == i) + qdi65_identify_port(r, res, port); + release_region(port, 2); + } + } +} /** * legacy_init - attach legacy interfaces @@ -845,15 +1183,17 @@ static __init int legacy_init(void) int ct = 0; int primary = 0; int secondary = 0; - int last_port = NR_HOST; + int pci_present = 0; + struct legacy_probe *pl = &probe_list[0]; + int slot = 0; struct pci_dev *p = NULL; for_each_pci_dev(p) { int r; - /* Check for any overlap of the system ATA mappings. Native mode controllers - stuck on these addresses or some devices in 'raid' mode won't be found by - the storage class test */ + /* Check for any overlap of the system ATA mappings. Native + mode controllers stuck on these addresses or some devices + in 'raid' mode won't be found by the storage class test */ for (r = 0; r < 6; r++) { if (pci_resource_start(p, r) == 0x1f0) primary = 1; @@ -863,49 +1203,39 @@ static __init int legacy_init(void) /* Check for special cases */ legacy_check_special_cases(p, &primary, &secondary); - /* If PCI bus is present then don't probe for tertiary legacy ports */ - if (probe_all == 0) - last_port = 2; + /* If PCI bus is present then don't probe for tertiary + legacy ports */ + pci_present = 1; } - /* If an OPTI 82C46X is present find out where the channels are */ - if (opti82c46x) { - static const char *optis[4] = { - "3/463MV", "5MV", - "5MVA", "5MVB" - }; - u8 chans = 1; - u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6; - - opti82c46x = 3; /* Assume master and slave first */ - printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n", optis[ctrl]); - if (ctrl == 3) - chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1; - ctrl = opti_syscfg(0xAC); - /* Check enabled and this port is the 465MV port. On the - MVB we may have two channels */ - if (ctrl & 8) { - if (ctrl & 4) - opti82c46x = 2; /* Slave */ - else - opti82c46x = 1; /* Master */ - if (chans == 2) - opti82c46x = 3; /* Master and Slave */ - } /* Slave only */ - else if (chans == 1) - opti82c46x = 1; + if (winbond == 1) + winbond = 0x130; /* Default port, alt is 1B0 */ + + if (primary == 0 || all) + legacy_probe_add(0x1F0, 14, UNKNOWN, 0); + if (secondary == 0 || all) + legacy_probe_add(0x170, 15, UNKNOWN, 0); + + if (probe_all || !pci_present) { + /* ISA/VLB extra ports */ + legacy_probe_add(0x1E8, 11, UNKNOWN, 0); + legacy_probe_add(0x168, 10, UNKNOWN, 0); + legacy_probe_add(0x1E0, 8, UNKNOWN, 0); + legacy_probe_add(0x160, 12, UNKNOWN, 0); } - for (i = 0; i < last_port; i++) { - /* Skip primary if we have seen a PCI one */ - if (i == 0 && primary == 1) - continue; - /* Skip secondary if we have seen a PCI one */ - if (i == 1 && secondary == 1) + if (opti82c46x) + probe_opti_vlb(); + if (qdi) + probe_qdi_vlb(); + + for (i = 0; i < NR_HOST; i++, pl++) { + if (pl->port == 0) continue; - if (legacy_init_one(i, legacy_port[i], - legacy_port[i] + 0x0206, - legacy_irq[i]) == 0) + if (pl->type == UNKNOWN) + pl->type = probe_chip_type(pl); + pl->slot = slot++; + if (legacy_init_one(pl) == 0) ct++; } if (ct != 0) @@ -919,15 +1249,8 @@ static __exit void legacy_exit(void) for (i = 0; i < nr_legacy_host; i++) { struct legacy_data *ld = &legacy_data[i]; - struct ata_port *ap =legacy_host[i]->ports[0]; - unsigned long io = ap->ioaddr.cmd_addr; - unsigned long ctrl = ap->ioaddr.ctl_addr; - ata_host_remove(legacy_host[i]); + ata_host_detach(legacy_host[i]); platform_device_unregister(ld->platform_dev); - if (ld->timing) - release_region(ld->timing, 2); - release_region(io, 8); - release_region(ctrl, 1); } } @@ -935,6 +1258,8 @@ MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for legacy ATA"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("pata_qdi"); +MODULE_ALIAS("pata_winbond"); module_param(probe_all, int, 0); module_param(autospeed, int, 0); @@ -942,8 +1267,10 @@ module_param(ht6560a, int, 0); module_param(ht6560b, int, 0); module_param(opti82c611a, int, 0); module_param(opti82c46x, int, 0); +module_param(qdi, int, 0); +module_param(winbond, int, 0); module_param(pio_mask, int, 0); +module_param(iordy_mask, int, 0); module_init(legacy_init); module_exit(legacy_exit); - diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c new file mode 100644 index 00000000000..a02f76fdcfc --- /dev/null +++ b/drivers/ata/pata_macio.c @@ -0,0 +1,1421 @@ +/* + * Libata based driver for Apple "macio" family of PATA controllers + * + * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp + * <benh@kernel.crashing.org> + * + * Some bits and pieces from drivers/ide/ppc/pmac.c + * + */ + +#undef DEBUG +#undef DEBUG_DMA + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/blkdev.h> +#include <linux/ata.h> +#include <linux/libata.h> +#include <linux/adb.h> +#include <linux/pmu.h> +#include <linux/scatterlist.h> +#include <linux/of.h> +#include <linux/gfp.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> + +#include <asm/macio.h> +#include <asm/io.h> +#include <asm/dbdma.h> +#include <asm/pci-bridge.h> +#include <asm/machdep.h> +#include <asm/pmac_feature.h> +#include <asm/mediabay.h> + +#ifdef DEBUG_DMA +#define dev_dbgdma(dev, format, arg...) \ + dev_printk(KERN_DEBUG , dev , format , ## arg) +#else +#define dev_dbgdma(dev, format, arg...) \ + ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) +#endif + +#define DRV_NAME "pata_macio" +#define DRV_VERSION "0.9" + +/* Models of macio ATA controller */ +enum { + controller_ohare, /* OHare based */ + controller_heathrow, /* Heathrow/Paddington */ + controller_kl_ata3, /* KeyLargo ATA-3 */ + controller_kl_ata4, /* KeyLargo ATA-4 */ + controller_un_ata6, /* UniNorth2 ATA-6 */ + controller_k2_ata6, /* K2 ATA-6 */ + controller_sh_ata6, /* Shasta ATA-6 */ +}; + +static const char* macio_ata_names[] = { + "OHare ATA", /* OHare based */ + "Heathrow ATA", /* Heathrow/Paddington */ + "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */ + "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */ + "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */ + "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */ + "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */ +}; + +/* + * Extra registers, both 32-bit little-endian + */ +#define IDE_TIMING_CONFIG 0x200 +#define IDE_INTERRUPT 0x300 + +/* Kauai (U2) ATA has different register setup */ +#define IDE_KAUAI_PIO_CONFIG 0x200 +#define IDE_KAUAI_ULTRA_CONFIG 0x210 +#define IDE_KAUAI_POLL_CONFIG 0x220 + +/* + * Timing configuration register definitions + */ + +/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */ +#define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS) +#define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS) +#define IDE_SYSCLK_NS 30 /* 33Mhz cell */ +#define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */ + +/* 133Mhz cell, found in shasta. + * See comments about 100 Mhz Uninorth 2... + * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just + * weird and I don't now why .. at this stage + */ +#define TR_133_PIOREG_PIO_MASK 0xff000fff +#define TR_133_PIOREG_MDMA_MASK 0x00fff800 +#define TR_133_UDMAREG_UDMA_MASK 0x0003ffff +#define TR_133_UDMAREG_UDMA_EN 0x00000001 + +/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device + * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is + * controlled like gem or fw. It appears to be an evolution of keylargo + * ATA4 with a timing register extended to 2x32bits registers (one + * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel. + * It has it's own local feature control register as well. + * + * After scratching my mind over the timing values, at least for PIO + * and MDMA, I think I've figured the format of the timing register, + * though I use pre-calculated tables for UDMA as usual... + */ +#define TR_100_PIO_ADDRSETUP_MASK 0xff000000 /* Size of field unknown */ +#define TR_100_PIO_ADDRSETUP_SHIFT 24 +#define TR_100_MDMA_MASK 0x00fff000 +#define TR_100_MDMA_RECOVERY_MASK 0x00fc0000 +#define TR_100_MDMA_RECOVERY_SHIFT 18 +#define TR_100_MDMA_ACCESS_MASK 0x0003f000 +#define TR_100_MDMA_ACCESS_SHIFT 12 +#define TR_100_PIO_MASK 0xff000fff +#define TR_100_PIO_RECOVERY_MASK 0x00000fc0 +#define TR_100_PIO_RECOVERY_SHIFT 6 +#define TR_100_PIO_ACCESS_MASK 0x0000003f +#define TR_100_PIO_ACCESS_SHIFT 0 + +#define TR_100_UDMAREG_UDMA_MASK 0x0000ffff +#define TR_100_UDMAREG_UDMA_EN 0x00000001 + + +/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on + * 40 connector cable and to 4 on 80 connector one. + * Clock unit is 15ns (66Mhz) + * + * 3 Values can be programmed: + * - Write data setup, which appears to match the cycle time. They + * also call it DIOW setup. + * - Ready to pause time (from spec) + * - Address setup. That one is weird. I don't see where exactly + * it fits in UDMA cycles, I got it's name from an obscure piece + * of commented out code in Darwin. They leave it to 0, we do as + * well, despite a comment that would lead to think it has a + * min value of 45ns. + * Apple also add 60ns to the write data setup (or cycle time ?) on + * reads. + */ +#define TR_66_UDMA_MASK 0xfff00000 +#define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */ +#define TR_66_PIO_ADDRSETUP_MASK 0xe0000000 /* Address setup */ +#define TR_66_PIO_ADDRSETUP_SHIFT 29 +#define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */ +#define TR_66_UDMA_RDY2PAUS_SHIFT 25 +#define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */ +#define TR_66_UDMA_WRDATASETUP_SHIFT 21 +#define TR_66_MDMA_MASK 0x000ffc00 +#define TR_66_MDMA_RECOVERY_MASK 0x000f8000 +#define TR_66_MDMA_RECOVERY_SHIFT 15 +#define TR_66_MDMA_ACCESS_MASK 0x00007c00 +#define TR_66_MDMA_ACCESS_SHIFT 10 +#define TR_66_PIO_MASK 0xe00003ff +#define TR_66_PIO_RECOVERY_MASK 0x000003e0 +#define TR_66_PIO_RECOVERY_SHIFT 5 +#define TR_66_PIO_ACCESS_MASK 0x0000001f +#define TR_66_PIO_ACCESS_SHIFT 0 + +/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo + * Can do pio & mdma modes, clock unit is 30ns (33Mhz) + * + * The access time and recovery time can be programmed. Some older + * Darwin code base limit OHare to 150ns cycle time. I decided to do + * the same here fore safety against broken old hardware ;) + * The HalfTick bit, when set, adds half a clock (15ns) to the access + * time and removes one from recovery. It's not supported on KeyLargo + * implementation afaik. The E bit appears to be set for PIO mode 0 and + * is used to reach long timings used in this mode. + */ +#define TR_33_MDMA_MASK 0x003ff800 +#define TR_33_MDMA_RECOVERY_MASK 0x001f0000 +#define TR_33_MDMA_RECOVERY_SHIFT 16 +#define TR_33_MDMA_ACCESS_MASK 0x0000f800 +#define TR_33_MDMA_ACCESS_SHIFT 11 +#define TR_33_MDMA_HALFTICK 0x00200000 +#define TR_33_PIO_MASK 0x000007ff +#define TR_33_PIO_E 0x00000400 +#define TR_33_PIO_RECOVERY_MASK 0x000003e0 +#define TR_33_PIO_RECOVERY_SHIFT 5 +#define TR_33_PIO_ACCESS_MASK 0x0000001f +#define TR_33_PIO_ACCESS_SHIFT 0 + +/* + * Interrupt register definitions. Only present on newer cells + * (Keylargo and later afaik) so we don't use it. + */ +#define IDE_INTR_DMA 0x80000000 +#define IDE_INTR_DEVICE 0x40000000 + +/* + * FCR Register on Kauai. Not sure what bit 0x4 is ... + */ +#define KAUAI_FCR_UATA_MAGIC 0x00000004 +#define KAUAI_FCR_UATA_RESET_N 0x00000002 +#define KAUAI_FCR_UATA_ENABLE 0x00000001 + + +/* Allow up to 256 DBDMA commands per xfer */ +#define MAX_DCMDS 256 + +/* Don't let a DMA segment go all the way to 64K */ +#define MAX_DBDMA_SEG 0xff00 + + +/* + * Wait 1s for disk to answer on IDE bus after a hard reset + * of the device (via GPIO/FCR). + * + * Some devices seem to "pollute" the bus even after dropping + * the BSY bit (typically some combo drives slave on the UDMA + * bus) after a hard reset. Since we hard reset all drives on + * KeyLargo ATA66, we have to keep that delay around. I may end + * up not hard resetting anymore on these and keep the delay only + * for older interfaces instead (we have to reset when coming + * from MacOS...) --BenH. + */ +#define IDE_WAKEUP_DELAY_MS 1000 + +struct pata_macio_timing; + +struct pata_macio_priv { + int kind; + int aapl_bus_id; + int mediabay : 1; + struct device_node *node; + struct macio_dev *mdev; + struct pci_dev *pdev; + struct device *dev; + int irq; + u32 treg[2][2]; + void __iomem *tfregs; + void __iomem *kauai_fcr; + struct dbdma_cmd * dma_table_cpu; + dma_addr_t dma_table_dma; + struct ata_host *host; + const struct pata_macio_timing *timings; +}; + +/* Previous variants of this driver used to calculate timings + * for various variants of the chip and use tables for others. + * + * Not only was this confusing, but in addition, it isn't clear + * whether our calculation code was correct. It didn't entirely + * match the darwin code and whatever documentation I could find + * on these cells + * + * I decided to entirely rely on a table instead for this version + * of the driver. Also, because I don't really care about derated + * modes and really old HW other than making it work, I'm not going + * to calculate / snoop timing values for something else than the + * standard modes. + */ +struct pata_macio_timing { + int mode; + u32 reg1; /* Bits to set in first timing reg */ + u32 reg2; /* Bits to set in second timing reg */ +}; + +static const struct pata_macio_timing pata_macio_ohare_timings[] = { + { XFER_PIO_0, 0x00000526, 0, }, + { XFER_PIO_1, 0x00000085, 0, }, + { XFER_PIO_2, 0x00000025, 0, }, + { XFER_PIO_3, 0x00000025, 0, }, + { XFER_PIO_4, 0x00000025, 0, }, + { XFER_MW_DMA_0, 0x00074000, 0, }, + { XFER_MW_DMA_1, 0x00221000, 0, }, + { XFER_MW_DMA_2, 0x00211000, 0, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing pata_macio_heathrow_timings[] = { + { XFER_PIO_0, 0x00000526, 0, }, + { XFER_PIO_1, 0x00000085, 0, }, + { XFER_PIO_2, 0x00000025, 0, }, + { XFER_PIO_3, 0x00000025, 0, }, + { XFER_PIO_4, 0x00000025, 0, }, + { XFER_MW_DMA_0, 0x00074000, 0, }, + { XFER_MW_DMA_1, 0x00221000, 0, }, + { XFER_MW_DMA_2, 0x00211000, 0, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing pata_macio_kl33_timings[] = { + { XFER_PIO_0, 0x00000526, 0, }, + { XFER_PIO_1, 0x00000085, 0, }, + { XFER_PIO_2, 0x00000025, 0, }, + { XFER_PIO_3, 0x00000025, 0, }, + { XFER_PIO_4, 0x00000025, 0, }, + { XFER_MW_DMA_0, 0x00084000, 0, }, + { XFER_MW_DMA_1, 0x00021800, 0, }, + { XFER_MW_DMA_2, 0x00011800, 0, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing pata_macio_kl66_timings[] = { + { XFER_PIO_0, 0x0000038c, 0, }, + { XFER_PIO_1, 0x0000020a, 0, }, + { XFER_PIO_2, 0x00000127, 0, }, + { XFER_PIO_3, 0x000000c6, 0, }, + { XFER_PIO_4, 0x00000065, 0, }, + { XFER_MW_DMA_0, 0x00084000, 0, }, + { XFER_MW_DMA_1, 0x00029800, 0, }, + { XFER_MW_DMA_2, 0x00019400, 0, }, + { XFER_UDMA_0, 0x19100000, 0, }, + { XFER_UDMA_1, 0x14d00000, 0, }, + { XFER_UDMA_2, 0x10900000, 0, }, + { XFER_UDMA_3, 0x0c700000, 0, }, + { XFER_UDMA_4, 0x0c500000, 0, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing pata_macio_kauai_timings[] = { + { XFER_PIO_0, 0x08000a92, 0, }, + { XFER_PIO_1, 0x0800060f, 0, }, + { XFER_PIO_2, 0x0800038b, 0, }, + { XFER_PIO_3, 0x05000249, 0, }, + { XFER_PIO_4, 0x04000148, 0, }, + { XFER_MW_DMA_0, 0x00618000, 0, }, + { XFER_MW_DMA_1, 0x00209000, 0, }, + { XFER_MW_DMA_2, 0x00148000, 0, }, + { XFER_UDMA_0, 0, 0x000070c1, }, + { XFER_UDMA_1, 0, 0x00005d81, }, + { XFER_UDMA_2, 0, 0x00004a61, }, + { XFER_UDMA_3, 0, 0x00003a51, }, + { XFER_UDMA_4, 0, 0x00002a31, }, + { XFER_UDMA_5, 0, 0x00002921, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing pata_macio_shasta_timings[] = { + { XFER_PIO_0, 0x0a000c97, 0, }, + { XFER_PIO_1, 0x07000712, 0, }, + { XFER_PIO_2, 0x040003cd, 0, }, + { XFER_PIO_3, 0x0500028b, 0, }, + { XFER_PIO_4, 0x0400010a, 0, }, + { XFER_MW_DMA_0, 0x00820800, 0, }, + { XFER_MW_DMA_1, 0x0028b000, 0, }, + { XFER_MW_DMA_2, 0x001ca000, 0, }, + { XFER_UDMA_0, 0, 0x00035901, }, + { XFER_UDMA_1, 0, 0x000348b1, }, + { XFER_UDMA_2, 0, 0x00033881, }, + { XFER_UDMA_3, 0, 0x00033861, }, + { XFER_UDMA_4, 0, 0x00033841, }, + { XFER_UDMA_5, 0, 0x00033031, }, + { XFER_UDMA_6, 0, 0x00033021, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing *pata_macio_find_timing( + struct pata_macio_priv *priv, + int mode) +{ + int i; + + for (i = 0; priv->timings[i].mode > 0; i++) { + if (priv->timings[i].mode == mode) + return &priv->timings[i]; + } + return NULL; +} + + +static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device) +{ + struct pata_macio_priv *priv = ap->private_data; + void __iomem *rbase = ap->ioaddr.cmd_addr; + + if (priv->kind == controller_sh_ata6 || + priv->kind == controller_un_ata6 || + priv->kind == controller_k2_ata6) { + writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG); + writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG); + } else + writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG); +} + +static void pata_macio_dev_select(struct ata_port *ap, unsigned int device) +{ + ata_sff_dev_select(ap, device); + + /* Apply timings */ + pata_macio_apply_timings(ap, device); +} + +static void pata_macio_set_timings(struct ata_port *ap, + struct ata_device *adev) +{ + struct pata_macio_priv *priv = ap->private_data; + const struct pata_macio_timing *t; + + dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n", + adev->devno, + adev->pio_mode, + ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)), + adev->dma_mode, + ata_mode_string(ata_xfer_mode2mask(adev->dma_mode))); + + /* First clear timings */ + priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0; + + /* Now get the PIO timings */ + t = pata_macio_find_timing(priv, adev->pio_mode); + if (t == NULL) { + dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n", + adev->pio_mode); + t = pata_macio_find_timing(priv, XFER_PIO_0); + } + BUG_ON(t == NULL); + + /* PIO timings only ever use the first treg */ + priv->treg[adev->devno][0] |= t->reg1; + + /* Now get DMA timings */ + t = pata_macio_find_timing(priv, adev->dma_mode); + if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) { + dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n"); + t = pata_macio_find_timing(priv, XFER_MW_DMA_0); + } + BUG_ON(t == NULL); + + /* DMA timings can use both tregs */ + priv->treg[adev->devno][0] |= t->reg1; + priv->treg[adev->devno][1] |= t->reg2; + + dev_dbg(priv->dev, " -> %08x %08x\n", + priv->treg[adev->devno][0], + priv->treg[adev->devno][1]); + + /* Apply to hardware */ + pata_macio_apply_timings(ap, adev->devno); +} + +/* + * Blast some well known "safe" values to the timing registers at init or + * wakeup from sleep time, before we do real calculation + */ +static void pata_macio_default_timings(struct pata_macio_priv *priv) +{ + unsigned int value, value2 = 0; + + switch(priv->kind) { + case controller_sh_ata6: + value = 0x0a820c97; + value2 = 0x00033031; + break; + case controller_un_ata6: + case controller_k2_ata6: + value = 0x08618a92; + value2 = 0x00002921; + break; + case controller_kl_ata4: + value = 0x0008438c; + break; + case controller_kl_ata3: + value = 0x00084526; + break; + case controller_heathrow: + case controller_ohare: + default: + value = 0x00074526; + break; + } + priv->treg[0][0] = priv->treg[1][0] = value; + priv->treg[0][1] = priv->treg[1][1] = value2; +} + +static int pata_macio_cable_detect(struct ata_port *ap) +{ + struct pata_macio_priv *priv = ap->private_data; + + /* Get cable type from device-tree */ + if (priv->kind == controller_kl_ata4 || + priv->kind == controller_un_ata6 || + priv->kind == controller_k2_ata6 || + priv->kind == controller_sh_ata6) { + const char* cable = of_get_property(priv->node, "cable-type", + NULL); + struct device_node *root = of_find_node_by_path("/"); + const char *model = of_get_property(root, "model", NULL); + + if (cable && !strncmp(cable, "80-", 3)) { + /* Some drives fail to detect 80c cable in PowerBook + * These machine use proprietary short IDE cable + * anyway + */ + if (!strncmp(model, "PowerBook", 9)) + return ATA_CBL_PATA40_SHORT; + else + return ATA_CBL_PATA80; + } + } + + /* G5's seem to have incorrect cable type in device-tree. + * Let's assume they always have a 80 conductor cable, this seem to + * be always the case unless the user mucked around + */ + if (of_device_is_compatible(priv->node, "K2-UATA") || + of_device_is_compatible(priv->node, "shasta-ata")) + return ATA_CBL_PATA80; + + /* Anything else is 40 connectors */ + return ATA_CBL_PATA40; +} + +static void pata_macio_qc_prep(struct ata_queued_cmd *qc) +{ + unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); + struct ata_port *ap = qc->ap; + struct pata_macio_priv *priv = ap->private_data; + struct scatterlist *sg; + struct dbdma_cmd *table; + unsigned int si, pi; + + dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n", + __func__, qc, qc->flags, write, qc->dev->devno); + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + table = (struct dbdma_cmd *) priv->dma_table_cpu; + + pi = 0; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + u32 addr, sg_len, len; + + /* determine if physical DMA addr spans 64K boundary. + * Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + /* table overflow should never happen */ + BUG_ON (pi++ >= MAX_DCMDS); + + len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG; + st_le16(&table->command, write ? OUTPUT_MORE: INPUT_MORE); + st_le16(&table->req_count, len); + st_le32(&table->phy_addr, addr); + table->cmd_dep = 0; + table->xfer_status = 0; + table->res_count = 0; + addr += len; + sg_len -= len; + ++table; + } + } + + /* Should never happen according to Tejun */ + BUG_ON(!pi); + + /* Convert the last command to an input/output */ + table--; + st_le16(&table->command, write ? OUTPUT_LAST: INPUT_LAST); + table++; + + /* Add the stop command to the end of the list */ + memset(table, 0, sizeof(struct dbdma_cmd)); + st_le16(&table->command, DBDMA_STOP); + + dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); +} + + +static void pata_macio_freeze(struct ata_port *ap) +{ + struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; + + if (dma_regs) { + unsigned int timeout = 1000000; + + /* Make sure DMA controller is stopped */ + writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control); + while (--timeout && (readl(&dma_regs->status) & RUN)) + udelay(1); + } + + ata_sff_freeze(ap); +} + + +static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct pata_macio_priv *priv = ap->private_data; + struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; + int dev = qc->dev->devno; + + dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); + + /* Make sure DMA commands updates are visible */ + writel(priv->dma_table_dma, &dma_regs->cmdptr); + + /* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on + * UDMA reads + */ + if (priv->kind == controller_kl_ata4 && + (priv->treg[dev][0] & TR_66_UDMA_EN)) { + void __iomem *rbase = ap->ioaddr.cmd_addr; + u32 reg = priv->treg[dev][0]; + + if (!(qc->tf.flags & ATA_TFLAG_WRITE)) + reg += 0x00800000; + writel(reg, rbase + IDE_TIMING_CONFIG); + } + + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} + +static void pata_macio_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct pata_macio_priv *priv = ap->private_data; + struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; + + dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); + + writel((RUN << 16) | RUN, &dma_regs->control); + /* Make sure it gets to the controller right now */ + (void)readl(&dma_regs->control); +} + +static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct pata_macio_priv *priv = ap->private_data; + struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; + unsigned int timeout = 1000000; + + dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); + + /* Stop the DMA engine and wait for it to full halt */ + writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control); + while (--timeout && (readl(&dma_regs->status) & RUN)) + udelay(1); +} + +static u8 pata_macio_bmdma_status(struct ata_port *ap) +{ + struct pata_macio_priv *priv = ap->private_data; + struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; + u32 dstat, rstat = ATA_DMA_INTR; + unsigned long timeout = 0; + + dstat = readl(&dma_regs->status); + + dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat); + + /* We have two things to deal with here: + * + * - The dbdma won't stop if the command was started + * but completed with an error without transferring all + * datas. This happens when bad blocks are met during + * a multi-block transfer. + * + * - The dbdma fifo hasn't yet finished flushing to + * to system memory when the disk interrupt occurs. + * + */ + + /* First check for errors */ + if ((dstat & (RUN|DEAD)) != RUN) + rstat |= ATA_DMA_ERR; + + /* If ACTIVE is cleared, the STOP command has been hit and + * the transfer is complete. If not, we have to flush the + * channel. + */ + if ((dstat & ACTIVE) == 0) + return rstat; + + dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__); + + /* If dbdma didn't execute the STOP command yet, the + * active bit is still set. We consider that we aren't + * sharing interrupts (which is hopefully the case with + * those controllers) and so we just try to flush the + * channel for pending data in the fifo + */ + udelay(1); + writel((FLUSH << 16) | FLUSH, &dma_regs->control); + for (;;) { + udelay(1); + dstat = readl(&dma_regs->status); + if ((dstat & FLUSH) == 0) + break; + if (++timeout > 1000) { + dev_warn(priv->dev, "timeout flushing DMA\n"); + rstat |= ATA_DMA_ERR; + break; + } + } + return rstat; +} + +/* port_start is when we allocate the DMA command list */ +static int pata_macio_port_start(struct ata_port *ap) +{ + struct pata_macio_priv *priv = ap->private_data; + + if (ap->ioaddr.bmdma_addr == NULL) + return 0; + + /* Allocate space for the DBDMA commands. + * + * The +2 is +1 for the stop command and +1 to allow for + * aligning the start address to a multiple of 16 bytes. + */ + priv->dma_table_cpu = + dmam_alloc_coherent(priv->dev, + (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), + &priv->dma_table_dma, GFP_KERNEL); + if (priv->dma_table_cpu == NULL) { + dev_err(priv->dev, "Unable to allocate DMA command list\n"); + ap->ioaddr.bmdma_addr = NULL; + ap->mwdma_mask = 0; + ap->udma_mask = 0; + } + return 0; +} + +static void pata_macio_irq_clear(struct ata_port *ap) +{ + struct pata_macio_priv *priv = ap->private_data; + + /* Nothing to do here */ + + dev_dbgdma(priv->dev, "%s\n", __func__); +} + +static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume) +{ + dev_dbg(priv->dev, "Enabling & resetting... \n"); + + if (priv->mediabay) + return; + + if (priv->kind == controller_ohare && !resume) { + /* The code below is having trouble on some ohare machines + * (timing related ?). Until I can put my hand on one of these + * units, I keep the old way + */ + ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1); + } else { + int rc; + + /* Reset and enable controller */ + rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET, + priv->node, priv->aapl_bus_id, 1); + ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, + priv->node, priv->aapl_bus_id, 1); + msleep(10); + /* Only bother waiting if there's a reset control */ + if (rc == 0) { + ppc_md.feature_call(PMAC_FTR_IDE_RESET, + priv->node, priv->aapl_bus_id, 0); + msleep(IDE_WAKEUP_DELAY_MS); + } + } + + /* If resuming a PCI device, restore the config space here */ + if (priv->pdev && resume) { + int rc; + + pci_restore_state(priv->pdev); + rc = pcim_enable_device(priv->pdev); + if (rc) + dev_err(&priv->pdev->dev, + "Failed to enable device after resume (%d)\n", + rc); + else + pci_set_master(priv->pdev); + } + + /* On Kauai, initialize the FCR. We don't perform a reset, doesn't really + * seem necessary and speeds up the boot process + */ + if (priv->kauai_fcr) + writel(KAUAI_FCR_UATA_MAGIC | + KAUAI_FCR_UATA_RESET_N | + KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr); +} + +/* Hook the standard slave config to fixup some HW related alignment + * restrictions + */ +static int pata_macio_slave_config(struct scsi_device *sdev) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct pata_macio_priv *priv = ap->private_data; + struct ata_device *dev; + u16 cmd; + int rc; + + /* First call original */ + rc = ata_scsi_slave_config(sdev); + if (rc) + return rc; + + /* This is lifted from sata_nv */ + dev = &ap->link.device[sdev->id]; + + /* OHare has issues with non cache aligned DMA on some chipsets */ + if (priv->kind == controller_ohare) { + blk_queue_update_dma_alignment(sdev->request_queue, 31); + blk_queue_update_dma_pad(sdev->request_queue, 31); + + /* Tell the world about it */ + ata_dev_info(dev, "OHare alignment limits applied\n"); + return 0; + } + + /* We only have issues with ATAPI */ + if (dev->class != ATA_DEV_ATAPI) + return 0; + + /* Shasta and K2 seem to have "issues" with reads ... */ + if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) { + /* Allright these are bad, apply restrictions */ + blk_queue_update_dma_alignment(sdev->request_queue, 15); + blk_queue_update_dma_pad(sdev->request_queue, 15); + + /* We enable MWI and hack cache line size directly here, this + * is specific to this chipset and not normal values, we happen + * to somewhat know what we are doing here (which is basically + * to do the same Apple does and pray they did not get it wrong :-) + */ + BUG_ON(!priv->pdev); + pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08); + pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd); + pci_write_config_word(priv->pdev, PCI_COMMAND, + cmd | PCI_COMMAND_INVALIDATE); + + /* Tell the world about it */ + ata_dev_info(dev, "K2/Shasta alignment limits applied\n"); + } + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg) +{ + int rc; + + /* First, core libata suspend to do most of the work */ + rc = ata_host_suspend(priv->host, mesg); + if (rc) + return rc; + + /* Restore to default timings */ + pata_macio_default_timings(priv); + + /* Mask interrupt. Not strictly necessary but old driver did + * it and I'd rather not change that here */ + disable_irq(priv->irq); + + /* The media bay will handle itself just fine */ + if (priv->mediabay) + return 0; + + /* Kauai has bus control FCRs directly here */ + if (priv->kauai_fcr) { + u32 fcr = readl(priv->kauai_fcr); + fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE); + writel(fcr, priv->kauai_fcr); + } + + /* For PCI, save state and disable DMA. No need to call + * pci_set_power_state(), the HW doesn't do D states that + * way, the platform code will take care of suspending the + * ASIC properly + */ + if (priv->pdev) { + pci_save_state(priv->pdev); + pci_disable_device(priv->pdev); + } + + /* Disable the bus on older machines and the cell on kauai */ + ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, + priv->aapl_bus_id, 0); + + return 0; +} + +static int pata_macio_do_resume(struct pata_macio_priv *priv) +{ + /* Reset and re-enable the HW */ + pata_macio_reset_hw(priv, 1); + + /* Sanitize drive timings */ + pata_macio_apply_timings(priv->host->ports[0], 0); + + /* We want our IRQ back ! */ + enable_irq(priv->irq); + + /* Let the libata core take it from there */ + ata_host_resume(priv->host); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static struct scsi_host_template pata_macio_sht = { + ATA_BASE_SHT(DRV_NAME), + .sg_tablesize = MAX_DCMDS, + /* We may not need that strict one */ + .dma_boundary = ATA_DMA_BOUNDARY, + .slave_configure = pata_macio_slave_config, +}; + +static struct ata_port_operations pata_macio_ops = { + .inherits = &ata_bmdma_port_ops, + + .freeze = pata_macio_freeze, + .set_piomode = pata_macio_set_timings, + .set_dmamode = pata_macio_set_timings, + .cable_detect = pata_macio_cable_detect, + .sff_dev_select = pata_macio_dev_select, + .qc_prep = pata_macio_qc_prep, + .bmdma_setup = pata_macio_bmdma_setup, + .bmdma_start = pata_macio_bmdma_start, + .bmdma_stop = pata_macio_bmdma_stop, + .bmdma_status = pata_macio_bmdma_status, + .port_start = pata_macio_port_start, + .sff_irq_clear = pata_macio_irq_clear, +}; + +static void pata_macio_invariants(struct pata_macio_priv *priv) +{ + const int *bidp; + + /* Identify the type of controller */ + if (of_device_is_compatible(priv->node, "shasta-ata")) { + priv->kind = controller_sh_ata6; + priv->timings = pata_macio_shasta_timings; + } else if (of_device_is_compatible(priv->node, "kauai-ata")) { + priv->kind = controller_un_ata6; + priv->timings = pata_macio_kauai_timings; + } else if (of_device_is_compatible(priv->node, "K2-UATA")) { + priv->kind = controller_k2_ata6; + priv->timings = pata_macio_kauai_timings; + } else if (of_device_is_compatible(priv->node, "keylargo-ata")) { + if (strcmp(priv->node->name, "ata-4") == 0) { + priv->kind = controller_kl_ata4; + priv->timings = pata_macio_kl66_timings; + } else { + priv->kind = controller_kl_ata3; + priv->timings = pata_macio_kl33_timings; + } + } else if (of_device_is_compatible(priv->node, "heathrow-ata")) { + priv->kind = controller_heathrow; + priv->timings = pata_macio_heathrow_timings; + } else { + priv->kind = controller_ohare; + priv->timings = pata_macio_ohare_timings; + } + + /* XXX FIXME --- setup priv->mediabay here */ + + /* Get Apple bus ID (for clock and ASIC control) */ + bidp = of_get_property(priv->node, "AAPL,bus-id", NULL); + priv->aapl_bus_id = bidp ? *bidp : 0; + + /* Fixup missing Apple bus ID in case of media-bay */ + if (priv->mediabay && bidp == 0) + priv->aapl_bus_id = 1; +} + +static void pata_macio_setup_ios(struct ata_ioports *ioaddr, + void __iomem * base, void __iomem * dma) +{ + /* cmd_addr is the base of regs for that port */ + ioaddr->cmd_addr = base; + + /* taskfile registers */ + ioaddr->data_addr = base + (ATA_REG_DATA << 4); + ioaddr->error_addr = base + (ATA_REG_ERR << 4); + ioaddr->feature_addr = base + (ATA_REG_FEATURE << 4); + ioaddr->nsect_addr = base + (ATA_REG_NSECT << 4); + ioaddr->lbal_addr = base + (ATA_REG_LBAL << 4); + ioaddr->lbam_addr = base + (ATA_REG_LBAM << 4); + ioaddr->lbah_addr = base + (ATA_REG_LBAH << 4); + ioaddr->device_addr = base + (ATA_REG_DEVICE << 4); + ioaddr->status_addr = base + (ATA_REG_STATUS << 4); + ioaddr->command_addr = base + (ATA_REG_CMD << 4); + ioaddr->altstatus_addr = base + 0x160; + ioaddr->ctl_addr = base + 0x160; + ioaddr->bmdma_addr = dma; +} + +static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv, + struct ata_port_info *pinfo) +{ + int i = 0; + + pinfo->pio_mask = 0; + pinfo->mwdma_mask = 0; + pinfo->udma_mask = 0; + + while (priv->timings[i].mode > 0) { + unsigned int mask = 1U << (priv->timings[i].mode & 0x0f); + switch(priv->timings[i].mode & 0xf0) { + case 0x00: /* PIO */ + pinfo->pio_mask |= (mask >> 8); + break; + case 0x20: /* MWDMA */ + pinfo->mwdma_mask |= mask; + break; + case 0x40: /* UDMA */ + pinfo->udma_mask |= mask; + break; + } + i++; + } + dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n", + pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask); +} + +static int pata_macio_common_init(struct pata_macio_priv *priv, + resource_size_t tfregs, + resource_size_t dmaregs, + resource_size_t fcregs, + unsigned long irq) +{ + struct ata_port_info pinfo; + const struct ata_port_info *ppi[] = { &pinfo, NULL }; + void __iomem *dma_regs = NULL; + + /* Fill up privates with various invariants collected from the + * device-tree + */ + pata_macio_invariants(priv); + + /* Make sure we have sane initial timings in the cache */ + pata_macio_default_timings(priv); + + /* Not sure what the real max is but we know it's less than 64K, let's + * use 64K minus 256 + */ + dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG); + + /* Allocate libata host for 1 port */ + memset(&pinfo, 0, sizeof(struct ata_port_info)); + pmac_macio_calc_timing_masks(priv, &pinfo); + pinfo.flags = ATA_FLAG_SLAVE_POSS; + pinfo.port_ops = &pata_macio_ops; + pinfo.private_data = priv; + + priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1); + if (priv->host == NULL) { + dev_err(priv->dev, "Failed to allocate ATA port structure\n"); + return -ENOMEM; + } + + /* Setup the private data in host too */ + priv->host->private_data = priv; + + /* Map base registers */ + priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100); + if (priv->tfregs == NULL) { + dev_err(priv->dev, "Failed to map ATA ports\n"); + return -ENOMEM; + } + priv->host->iomap = &priv->tfregs; + + /* Map DMA regs */ + if (dmaregs != 0) { + dma_regs = devm_ioremap(priv->dev, dmaregs, + sizeof(struct dbdma_regs)); + if (dma_regs == NULL) + dev_warn(priv->dev, "Failed to map ATA DMA registers\n"); + } + + /* If chip has local feature control, map those regs too */ + if (fcregs != 0) { + priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4); + if (priv->kauai_fcr == NULL) { + dev_err(priv->dev, "Failed to map ATA FCR register\n"); + return -ENOMEM; + } + } + + /* Setup port data structure */ + pata_macio_setup_ios(&priv->host->ports[0]->ioaddr, + priv->tfregs, dma_regs); + priv->host->ports[0]->private_data = priv; + + /* hard-reset the controller */ + pata_macio_reset_hw(priv, 0); + pata_macio_apply_timings(priv->host->ports[0], 0); + + /* Enable bus master if necessary */ + if (priv->pdev && dma_regs) + pci_set_master(priv->pdev); + + dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n", + macio_ata_names[priv->kind], priv->aapl_bus_id); + + /* Start it up */ + priv->irq = irq; + return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0, + &pata_macio_sht); +} + +static int pata_macio_attach(struct macio_dev *mdev, + const struct of_device_id *match) +{ + struct pata_macio_priv *priv; + resource_size_t tfregs, dmaregs = 0; + unsigned long irq; + int rc; + + /* Check for broken device-trees */ + if (macio_resource_count(mdev) == 0) { + dev_err(&mdev->ofdev.dev, + "No addresses for controller\n"); + return -ENXIO; + } + + /* Enable managed resources */ + macio_enable_devres(mdev); + + /* Allocate and init private data structure */ + priv = devm_kzalloc(&mdev->ofdev.dev, + sizeof(struct pata_macio_priv), GFP_KERNEL); + if (priv == NULL) { + dev_err(&mdev->ofdev.dev, + "Failed to allocate private memory\n"); + return -ENOMEM; + } + priv->node = of_node_get(mdev->ofdev.dev.of_node); + priv->mdev = mdev; + priv->dev = &mdev->ofdev.dev; + + /* Request memory resource for taskfile registers */ + if (macio_request_resource(mdev, 0, "pata-macio")) { + dev_err(&mdev->ofdev.dev, + "Cannot obtain taskfile resource\n"); + return -EBUSY; + } + tfregs = macio_resource_start(mdev, 0); + + /* Request resources for DMA registers if any */ + if (macio_resource_count(mdev) >= 2) { + if (macio_request_resource(mdev, 1, "pata-macio-dma")) + dev_err(&mdev->ofdev.dev, + "Cannot obtain DMA resource\n"); + else + dmaregs = macio_resource_start(mdev, 1); + } + + /* + * Fixup missing IRQ for some old implementations with broken + * device-trees. + * + * This is a bit bogus, it should be fixed in the device-tree itself, + * via the existing macio fixups, based on the type of interrupt + * controller in the machine. However, I have no test HW for this case, + * and this trick works well enough on those old machines... + */ + if (macio_irq_count(mdev) == 0) { + dev_warn(&mdev->ofdev.dev, + "No interrupts for controller, using 13\n"); + irq = irq_create_mapping(NULL, 13); + } else + irq = macio_irq(mdev, 0); + + /* Prevvent media bay callbacks until fully registered */ + lock_media_bay(priv->mdev->media_bay); + + /* Get register addresses and call common initialization */ + rc = pata_macio_common_init(priv, + tfregs, /* Taskfile regs */ + dmaregs, /* DBDMA regs */ + 0, /* Feature control */ + irq); + unlock_media_bay(priv->mdev->media_bay); + + return rc; +} + +static int pata_macio_detach(struct macio_dev *mdev) +{ + struct ata_host *host = macio_get_drvdata(mdev); + struct pata_macio_priv *priv = host->private_data; + + lock_media_bay(priv->mdev->media_bay); + + /* Make sure the mediabay callback doesn't try to access + * dead stuff + */ + priv->host->private_data = NULL; + + ata_host_detach(host); + + unlock_media_bay(priv->mdev->media_bay); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) +{ + struct ata_host *host = macio_get_drvdata(mdev); + + return pata_macio_do_suspend(host->private_data, mesg); +} + +static int pata_macio_resume(struct macio_dev *mdev) +{ + struct ata_host *host = macio_get_drvdata(mdev); + + return pata_macio_do_resume(host->private_data); +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PMAC_MEDIABAY +static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state) +{ + struct ata_host *host = macio_get_drvdata(mdev); + struct ata_port *ap; + struct ata_eh_info *ehi; + struct ata_device *dev; + unsigned long flags; + + if (!host || !host->private_data) + return; + ap = host->ports[0]; + spin_lock_irqsave(ap->lock, flags); + ehi = &ap->link.eh_info; + if (mb_state == MB_CD) { + ata_ehi_push_desc(ehi, "mediabay plug"); + ata_ehi_hotplugged(ehi); + ata_port_freeze(ap); + } else { + ata_ehi_push_desc(ehi, "mediabay unplug"); + ata_for_each_dev(dev, &ap->link, ALL) + dev->flags |= ATA_DFLAG_DETACH; + ata_port_abort(ap); + } + spin_unlock_irqrestore(ap->lock, flags); + +} +#endif /* CONFIG_PMAC_MEDIABAY */ + + +static int pata_macio_pci_attach(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct pata_macio_priv *priv; + struct device_node *np; + resource_size_t rbase; + + /* We cannot use a MacIO controller without its OF device node */ + np = pci_device_to_OF_node(pdev); + if (np == NULL) { + dev_err(&pdev->dev, + "Cannot find OF device node for controller\n"); + return -ENODEV; + } + + /* Check that it can be enabled */ + if (pcim_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot enable controller PCI device\n"); + return -ENXIO; + } + + /* Allocate and init private data structure */ + priv = devm_kzalloc(&pdev->dev, + sizeof(struct pata_macio_priv), GFP_KERNEL); + if (priv == NULL) { + dev_err(&pdev->dev, + "Failed to allocate private memory\n"); + return -ENOMEM; + } + priv->node = of_node_get(np); + priv->pdev = pdev; + priv->dev = &pdev->dev; + + /* Get MMIO regions */ + if (pci_request_regions(pdev, "pata-macio")) { + dev_err(&pdev->dev, + "Cannot obtain PCI resources\n"); + return -EBUSY; + } + + /* Get register addresses and call common initialization */ + rbase = pci_resource_start(pdev, 0); + if (pata_macio_common_init(priv, + rbase + 0x2000, /* Taskfile regs */ + rbase + 0x1000, /* DBDMA regs */ + rbase, /* Feature control */ + pdev->irq)) + return -ENXIO; + + return 0; +} + +static void pata_macio_pci_detach(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + + ata_host_detach(host); +} + +#ifdef CONFIG_PM_SLEEP +static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) +{ + struct ata_host *host = pci_get_drvdata(pdev); + + return pata_macio_do_suspend(host->private_data, mesg); +} + +static int pata_macio_pci_resume(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + + return pata_macio_do_resume(host->private_data); +} +#endif /* CONFIG_PM_SLEEP */ + +static struct of_device_id pata_macio_match[] = +{ + { + .name = "IDE", + }, + { + .name = "ATA", + }, + { + .type = "ide", + }, + { + .type = "ata", + }, + {}, +}; + +static struct macio_driver pata_macio_driver = +{ + .driver = { + .name = "pata-macio", + .owner = THIS_MODULE, + .of_match_table = pata_macio_match, + }, + .probe = pata_macio_attach, + .remove = pata_macio_detach, +#ifdef CONFIG_PM_SLEEP + .suspend = pata_macio_suspend, + .resume = pata_macio_resume, +#endif +#ifdef CONFIG_PMAC_MEDIABAY + .mediabay_event = pata_macio_mb_event, +#endif +}; + +static const struct pci_device_id pata_macio_pci_match[] = { + { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 }, + { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 }, + { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 }, + { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 }, + { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 }, + {}, +}; + +static struct pci_driver pata_macio_pci_driver = { + .name = "pata-pci-macio", + .id_table = pata_macio_pci_match, + .probe = pata_macio_pci_attach, + .remove = pata_macio_pci_detach, +#ifdef CONFIG_PM_SLEEP + .suspend = pata_macio_pci_suspend, + .resume = pata_macio_pci_resume, +#endif + .driver = { + .owner = THIS_MODULE, + }, +}; +MODULE_DEVICE_TABLE(pci, pata_macio_pci_match); + + +static int __init pata_macio_init(void) +{ + int rc; + + if (!machine_is(powermac)) + return -ENODEV; + + rc = pci_register_driver(&pata_macio_pci_driver); + if (rc) + return rc; + rc = macio_register_driver(&pata_macio_driver); + if (rc) { + pci_unregister_driver(&pata_macio_pci_driver); + return rc; + } + return 0; +} + +static void __exit pata_macio_exit(void) +{ + macio_unregister_driver(&pata_macio_driver); + pci_unregister_driver(&pata_macio_pci_driver); +} + +module_init(pata_macio_init); +module_exit(pata_macio_exit); + +MODULE_AUTHOR("Benjamin Herrenschmidt"); +MODULE_DESCRIPTION("Apple MacIO PATA driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c new file mode 100644 index 00000000000..ae9feb1ba8f --- /dev/null +++ b/drivers/ata/pata_marvell.c @@ -0,0 +1,186 @@ +/* + * Marvell PATA driver. + * + * For the moment we drive the PATA port in legacy mode. That + * isn't making full use of the device functionality but it is + * easy to get working. + * + * (c) 2006 Red Hat + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> +#include <linux/ata.h> + +#define DRV_NAME "pata_marvell" +#define DRV_VERSION "0.1.6" + +/** + * marvell_pata_active - check if PATA is active + * @pdev: PCI device + * + * Returns 1 if the PATA port may be active. We know how to check this + * for the 6145 but not the other devices + */ + +static int marvell_pata_active(struct pci_dev *pdev) +{ + int i; + u32 devices; + void __iomem *barp; + + /* We don't yet know how to do this for other devices */ + if (pdev->device != 0x6145) + return 1; + + barp = pci_iomap(pdev, 5, 0x10); + if (barp == NULL) + return -ENOMEM; + + printk("BAR5:"); + for(i = 0; i <= 0x0F; i++) + printk("%02X:%02X ", i, ioread8(barp + i)); + printk("\n"); + + devices = ioread32(barp + 0x0C); + pci_iounmap(pdev, barp); + + if (devices & 0x10) + return 1; + return 0; +} + +/** + * marvell_pre_reset - probe begin + * @link: link + * @deadline: deadline jiffies for the operation + * + * Perform the PATA port setup we need. + */ + +static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + + if (pdev->device == 0x6145 && ap->port_no == 0 && + !marvell_pata_active(pdev)) /* PATA enable ? */ + return -ENOENT; + + return ata_sff_prereset(link, deadline); +} + +static int marvell_cable_detect(struct ata_port *ap) +{ + /* Cable type */ + switch(ap->port_no) + { + case 0: + if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; + case 1: /* Legacy SATA port */ + return ATA_CBL_SATA; + } + + BUG(); + return 0; /* Our BUG macro needs the right markup */ +} + +/* No PIO or DMA methods needed for this device */ + +static struct scsi_host_template marvell_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations marvell_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = marvell_cable_detect, + .prereset = marvell_pre_reset, +}; + + +/** + * marvell_init_one - Register Marvell ATA PCI device with kernel services + * @pdev: PCI device to register + * @ent: Entry in marvell_pci_tbl matching with @pdev + * + * Called from kernel PCI layer. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, or -ERRNO value. + */ + +static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id) +{ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, + + .port_ops = &marvell_ops, + }; + static const struct ata_port_info info_sata = { + /* Slave possible as its magically mapped not real */ + .flags = ATA_FLAG_SLAVE_POSS, + + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + + .port_ops = &marvell_ops, + }; + const struct ata_port_info *ppi[] = { &info, &info_sata }; + + if (pdev->device == 0x6101) + ppi[1] = &ata_dummy_port_info; + +#if defined(CONFIG_SATA_AHCI) || defined(CONFIG_SATA_AHCI_MODULE) + if (!marvell_pata_active(pdev)) { + printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n"); + return -ENODEV; + } +#endif + return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0); +} + +static const struct pci_device_id marvell_pci_tbl[] = { + { PCI_DEVICE(0x11AB, 0x6101), }, + { PCI_DEVICE(0x11AB, 0x6121), }, + { PCI_DEVICE(0x11AB, 0x6123), }, + { PCI_DEVICE(0x11AB, 0x6145), }, + { PCI_DEVICE(0x1B4B, 0x91A0), }, + { PCI_DEVICE(0x1B4B, 0x91A4), }, + + { } /* terminate list */ +}; + +static struct pci_driver marvell_pci_driver = { + .name = DRV_NAME, + .id_table = marvell_pci_tbl, + .probe = marvell_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif +}; + +module_pci_driver(marvell_pci_driver); + +MODULE_AUTHOR("Alan Cox"); +MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, marvell_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c new file mode 100644 index 00000000000..ccd1c83a05c --- /dev/null +++ b/drivers/ata/pata_mpc52xx.c @@ -0,0 +1,877 @@ +/* + * drivers/ata/pata_mpc52xx.c + * + * libata driver for the Freescale MPC52xx on-chip IDE interface + * + * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com> + * Copyright (C) 2003 Mipsys - Benjamin Herrenschmidt + * + * UDMA support based on patches by Freescale (Bernard Kuhn, John Rigby), + * Domen Puncer and Tim Yamin. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/gfp.h> +#include <linux/delay.h> +#include <linux/libata.h> +#include <linux/of_platform.h> +#include <linux/types.h> + +#include <asm/cacheflush.h> +#include <asm/prom.h> +#include <asm/mpc52xx.h> + +#include <linux/fsl/bestcomm/bestcomm.h> +#include <linux/fsl/bestcomm/bestcomm_priv.h> +#include <linux/fsl/bestcomm/ata.h> + +#define DRV_NAME "mpc52xx_ata" + +/* Private structures used by the driver */ +struct mpc52xx_ata_timings { + u32 pio1; + u32 pio2; + u32 mdma1; + u32 mdma2; + u32 udma1; + u32 udma2; + u32 udma3; + u32 udma4; + u32 udma5; + int using_udma; +}; + +struct mpc52xx_ata_priv { + unsigned int ipb_period; + struct mpc52xx_ata __iomem *ata_regs; + phys_addr_t ata_regs_pa; + int ata_irq; + struct mpc52xx_ata_timings timings[2]; + int csel; + + /* DMA */ + struct bcom_task *dmatsk; + const struct udmaspec *udmaspec; + const struct mdmaspec *mdmaspec; + int mpc52xx_ata_dma_last_write; + int waiting_for_dma; +}; + + +/* ATAPI-4 PIO specs (in ns) */ +static const u16 ataspec_t0[5] = {600, 383, 240, 180, 120}; +static const u16 ataspec_t1[5] = { 70, 50, 30, 30, 25}; +static const u16 ataspec_t2_8[5] = {290, 290, 290, 80, 70}; +static const u16 ataspec_t2_16[5] = {165, 125, 100, 80, 70}; +static const u16 ataspec_t2i[5] = { 0, 0, 0, 70, 25}; +static const u16 ataspec_t4[5] = { 30, 20, 15, 10, 10}; +static const u16 ataspec_ta[5] = { 35, 35, 35, 35, 35}; + +#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c))) + +/* ======================================================================== */ + +/* ATAPI-4 MDMA specs (in clocks) */ +struct mdmaspec { + u8 t0M; + u8 td; + u8 th; + u8 tj; + u8 tkw; + u8 tm; + u8 tn; +}; + +static const struct mdmaspec mdmaspec66[3] = { + { .t0M = 32, .td = 15, .th = 2, .tj = 2, .tkw = 15, .tm = 4, .tn = 1 }, + { .t0M = 10, .td = 6, .th = 1, .tj = 1, .tkw = 4, .tm = 2, .tn = 1 }, + { .t0M = 8, .td = 5, .th = 1, .tj = 1, .tkw = 2, .tm = 2, .tn = 1 }, +}; + +static const struct mdmaspec mdmaspec132[3] = { + { .t0M = 64, .td = 29, .th = 3, .tj = 3, .tkw = 29, .tm = 7, .tn = 2 }, + { .t0M = 20, .td = 11, .th = 2, .tj = 1, .tkw = 7, .tm = 4, .tn = 1 }, + { .t0M = 16, .td = 10, .th = 2, .tj = 1, .tkw = 4, .tm = 4, .tn = 1 }, +}; + +/* ATAPI-4 UDMA specs (in clocks) */ +struct udmaspec { + u8 tcyc; + u8 t2cyc; + u8 tds; + u8 tdh; + u8 tdvs; + u8 tdvh; + u8 tfs; + u8 tli; + u8 tmli; + u8 taz; + u8 tzah; + u8 tenv; + u8 tsr; + u8 trfs; + u8 trp; + u8 tack; + u8 tss; +}; + +static const struct udmaspec udmaspec66[6] = { + { .tcyc = 8, .t2cyc = 16, .tds = 1, .tdh = 1, .tdvs = 5, .tdvh = 1, + .tfs = 16, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, + .tsr = 3, .trfs = 5, .trp = 11, .tack = 2, .tss = 4, + }, + { .tcyc = 5, .t2cyc = 11, .tds = 1, .tdh = 1, .tdvs = 4, .tdvh = 1, + .tfs = 14, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, + .tsr = 2, .trfs = 5, .trp = 9, .tack = 2, .tss = 4, + }, + { .tcyc = 4, .t2cyc = 8, .tds = 1, .tdh = 1, .tdvs = 3, .tdvh = 1, + .tfs = 12, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, + .tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4, + }, + { .tcyc = 3, .t2cyc = 6, .tds = 1, .tdh = 1, .tdvs = 2, .tdvh = 1, + .tfs = 9, .tli = 7, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, + .tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4, + }, + { .tcyc = 2, .t2cyc = 4, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1, + .tfs = 8, .tli = 8, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, + .tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4, + }, + { .tcyc = 2, .t2cyc = 2, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1, + .tfs = 6, .tli = 5, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, + .tsr = 2, .trfs = 4, .trp = 6, .tack = 2, .tss = 4, + }, +}; + +static const struct udmaspec udmaspec132[6] = { + { .tcyc = 15, .t2cyc = 31, .tds = 2, .tdh = 1, .tdvs = 10, .tdvh = 1, + .tfs = 30, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3, + .tsr = 7, .trfs = 10, .trp = 22, .tack = 3, .tss = 7, + }, + { .tcyc = 10, .t2cyc = 21, .tds = 2, .tdh = 1, .tdvs = 7, .tdvh = 1, + .tfs = 27, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3, + .tsr = 4, .trfs = 10, .trp = 17, .tack = 3, .tss = 7, + }, + { .tcyc = 6, .t2cyc = 12, .tds = 1, .tdh = 1, .tdvs = 5, .tdvh = 1, + .tfs = 23, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3, + .tsr = 3, .trfs = 8, .trp = 14, .tack = 3, .tss = 7, + }, + { .tcyc = 7, .t2cyc = 12, .tds = 1, .tdh = 1, .tdvs = 3, .tdvh = 1, + .tfs = 15, .tli = 13, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3, + .tsr = 3, .trfs = 8, .trp = 14, .tack = 3, .tss = 7, + }, + { .tcyc = 2, .t2cyc = 5, .tds = 0, .tdh = 0, .tdvs = 1, .tdvh = 1, + .tfs = 16, .tli = 14, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2, + .tsr = 2, .trfs = 7, .trp = 13, .tack = 2, .tss = 6, + }, + { .tcyc = 3, .t2cyc = 6, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1, + .tfs = 12, .tli = 10, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3, + .tsr = 3, .trfs = 7, .trp = 12, .tack = 3, .tss = 7, + }, +}; + +/* ======================================================================== */ + +/* Bit definitions inside the registers */ +#define MPC52xx_ATA_HOSTCONF_SMR 0x80000000UL /* State machine reset */ +#define MPC52xx_ATA_HOSTCONF_FR 0x40000000UL /* FIFO Reset */ +#define MPC52xx_ATA_HOSTCONF_IE 0x02000000UL /* Enable interrupt in PIO */ +#define MPC52xx_ATA_HOSTCONF_IORDY 0x01000000UL /* Drive supports IORDY protocol */ + +#define MPC52xx_ATA_HOSTSTAT_TIP 0x80000000UL /* Transaction in progress */ +#define MPC52xx_ATA_HOSTSTAT_UREP 0x40000000UL /* UDMA Read Extended Pause */ +#define MPC52xx_ATA_HOSTSTAT_RERR 0x02000000UL /* Read Error */ +#define MPC52xx_ATA_HOSTSTAT_WERR 0x01000000UL /* Write Error */ + +#define MPC52xx_ATA_FIFOSTAT_EMPTY 0x01 /* FIFO Empty */ +#define MPC52xx_ATA_FIFOSTAT_ERROR 0x40 /* FIFO Error */ + +#define MPC52xx_ATA_DMAMODE_WRITE 0x01 /* Write DMA */ +#define MPC52xx_ATA_DMAMODE_READ 0x02 /* Read DMA */ +#define MPC52xx_ATA_DMAMODE_UDMA 0x04 /* UDMA enabled */ +#define MPC52xx_ATA_DMAMODE_IE 0x08 /* Enable drive interrupt to CPU in DMA mode */ +#define MPC52xx_ATA_DMAMODE_FE 0x10 /* FIFO Flush enable in Rx mode */ +#define MPC52xx_ATA_DMAMODE_FR 0x20 /* FIFO Reset */ +#define MPC52xx_ATA_DMAMODE_HUT 0x40 /* Host UDMA burst terminate */ + +#define MAX_DMA_BUFFERS 128 +#define MAX_DMA_BUFFER_SIZE 0x20000u + +/* Structure of the hardware registers */ +struct mpc52xx_ata { + + /* Host interface registers */ + u32 config; /* ATA + 0x00 Host configuration */ + u32 host_status; /* ATA + 0x04 Host controller status */ + u32 pio1; /* ATA + 0x08 PIO Timing 1 */ + u32 pio2; /* ATA + 0x0c PIO Timing 2 */ + u32 mdma1; /* ATA + 0x10 MDMA Timing 1 */ + u32 mdma2; /* ATA + 0x14 MDMA Timing 2 */ + u32 udma1; /* ATA + 0x18 UDMA Timing 1 */ + u32 udma2; /* ATA + 0x1c UDMA Timing 2 */ + u32 udma3; /* ATA + 0x20 UDMA Timing 3 */ + u32 udma4; /* ATA + 0x24 UDMA Timing 4 */ + u32 udma5; /* ATA + 0x28 UDMA Timing 5 */ + u32 share_cnt; /* ATA + 0x2c ATA share counter */ + u32 reserved0[3]; + + /* FIFO registers */ + u32 fifo_data; /* ATA + 0x3c */ + u8 fifo_status_frame; /* ATA + 0x40 */ + u8 fifo_status; /* ATA + 0x41 */ + u16 reserved7[1]; + u8 fifo_control; /* ATA + 0x44 */ + u8 reserved8[5]; + u16 fifo_alarm; /* ATA + 0x4a */ + u16 reserved9; + u16 fifo_rdp; /* ATA + 0x4e */ + u16 reserved10; + u16 fifo_wrp; /* ATA + 0x52 */ + u16 reserved11; + u16 fifo_lfrdp; /* ATA + 0x56 */ + u16 reserved12; + u16 fifo_lfwrp; /* ATA + 0x5a */ + + /* Drive TaskFile registers */ + u8 tf_control; /* ATA + 0x5c TASKFILE Control/Alt Status */ + u8 reserved13[3]; + u16 tf_data; /* ATA + 0x60 TASKFILE Data */ + u16 reserved14; + u8 tf_features; /* ATA + 0x64 TASKFILE Features/Error */ + u8 reserved15[3]; + u8 tf_sec_count; /* ATA + 0x68 TASKFILE Sector Count */ + u8 reserved16[3]; + u8 tf_sec_num; /* ATA + 0x6c TASKFILE Sector Number */ + u8 reserved17[3]; + u8 tf_cyl_low; /* ATA + 0x70 TASKFILE Cylinder Low */ + u8 reserved18[3]; + u8 tf_cyl_high; /* ATA + 0x74 TASKFILE Cylinder High */ + u8 reserved19[3]; + u8 tf_dev_head; /* ATA + 0x78 TASKFILE Device/Head */ + u8 reserved20[3]; + u8 tf_command; /* ATA + 0x7c TASKFILE Command/Status */ + u8 dma_mode; /* ATA + 0x7d ATA Host DMA Mode configuration */ + u8 reserved21[2]; +}; + + +/* ======================================================================== */ +/* Aux fns */ +/* ======================================================================== */ + + +/* MPC52xx low level hw control */ +static int +mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio) +{ + struct mpc52xx_ata_timings *timing = &priv->timings[dev]; + unsigned int ipb_period = priv->ipb_period; + u32 t0, t1, t2_8, t2_16, t2i, t4, ta; + + if ((pio < 0) || (pio > 4)) + return -EINVAL; + + t0 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t0[pio]); + t1 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t1[pio]); + t2_8 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_8[pio]); + t2_16 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_16[pio]); + t2i = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2i[pio]); + t4 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t4[pio]); + ta = CALC_CLKCYC(ipb_period, 1000 * ataspec_ta[pio]); + + timing->pio1 = (t0 << 24) | (t2_8 << 16) | (t2_16 << 8) | (t2i); + timing->pio2 = (t4 << 24) | (t1 << 16) | (ta << 8); + + return 0; +} + +static int +mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev, + int speed) +{ + struct mpc52xx_ata_timings *t = &priv->timings[dev]; + const struct mdmaspec *s = &priv->mdmaspec[speed]; + + if (speed < 0 || speed > 2) + return -EINVAL; + + t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm; + t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8); + t->using_udma = 0; + + return 0; +} + +static int +mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev, + int speed) +{ + struct mpc52xx_ata_timings *t = &priv->timings[dev]; + const struct udmaspec *s = &priv->udmaspec[speed]; + + if (speed < 0 || speed > 2) + return -EINVAL; + + t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh; + t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli; + t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr; + t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack; + t->udma5 = (u32)s->tzah << 24; + t->using_udma = 1; + + return 0; +} + +static void +mpc52xx_ata_apply_timings(struct mpc52xx_ata_priv *priv, int device) +{ + struct mpc52xx_ata __iomem *regs = priv->ata_regs; + struct mpc52xx_ata_timings *timing = &priv->timings[device]; + + out_be32(®s->pio1, timing->pio1); + out_be32(®s->pio2, timing->pio2); + out_be32(®s->mdma1, timing->mdma1); + out_be32(®s->mdma2, timing->mdma2); + out_be32(®s->udma1, timing->udma1); + out_be32(®s->udma2, timing->udma2); + out_be32(®s->udma3, timing->udma3); + out_be32(®s->udma4, timing->udma4); + out_be32(®s->udma5, timing->udma5); + priv->csel = device; +} + +static int +mpc52xx_ata_hw_init(struct mpc52xx_ata_priv *priv) +{ + struct mpc52xx_ata __iomem *regs = priv->ata_regs; + int tslot; + + /* Clear share_cnt (all sample code do this ...) */ + out_be32(®s->share_cnt, 0); + + /* Configure and reset host */ + out_be32(®s->config, + MPC52xx_ATA_HOSTCONF_IE | + MPC52xx_ATA_HOSTCONF_IORDY | + MPC52xx_ATA_HOSTCONF_SMR | + MPC52xx_ATA_HOSTCONF_FR); + + udelay(10); + + out_be32(®s->config, + MPC52xx_ATA_HOSTCONF_IE | + MPC52xx_ATA_HOSTCONF_IORDY); + + /* Set the time slot to 1us */ + tslot = CALC_CLKCYC(priv->ipb_period, 1000000); + out_be32(®s->share_cnt, tslot << 16); + + /* Init timings to PIO0 */ + memset(priv->timings, 0x00, 2*sizeof(struct mpc52xx_ata_timings)); + + mpc52xx_ata_compute_pio_timings(priv, 0, 0); + mpc52xx_ata_compute_pio_timings(priv, 1, 0); + + mpc52xx_ata_apply_timings(priv, 0); + + return 0; +} + + +/* ======================================================================== */ +/* libata driver */ +/* ======================================================================== */ + +static void +mpc52xx_ata_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + struct mpc52xx_ata_priv *priv = ap->host->private_data; + int pio, rv; + + pio = adev->pio_mode - XFER_PIO_0; + + rv = mpc52xx_ata_compute_pio_timings(priv, adev->devno, pio); + + if (rv) { + dev_err(ap->dev, "error: invalid PIO mode: %d\n", pio); + return; + } + + mpc52xx_ata_apply_timings(priv, adev->devno); +} + +static void +mpc52xx_ata_set_dmamode(struct ata_port *ap, struct ata_device *adev) +{ + struct mpc52xx_ata_priv *priv = ap->host->private_data; + int rv; + + if (adev->dma_mode >= XFER_UDMA_0) { + int dma = adev->dma_mode - XFER_UDMA_0; + rv = mpc52xx_ata_compute_udma_timings(priv, adev->devno, dma); + } else { + int dma = adev->dma_mode - XFER_MW_DMA_0; + rv = mpc52xx_ata_compute_mdma_timings(priv, adev->devno, dma); + } + + if (rv) { + dev_alert(ap->dev, + "Trying to select invalid DMA mode %d\n", + adev->dma_mode); + return; + } + + mpc52xx_ata_apply_timings(priv, adev->devno); +} + +static void +mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device) +{ + struct mpc52xx_ata_priv *priv = ap->host->private_data; + + if (device != priv->csel) + mpc52xx_ata_apply_timings(priv, device); + + ata_sff_dev_select(ap, device); +} + +static int +mpc52xx_ata_build_dmatable(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct mpc52xx_ata_priv *priv = ap->host->private_data; + struct bcom_ata_bd *bd; + unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE), si; + struct scatterlist *sg; + int count = 0; + + if (read) + bcom_ata_rx_prepare(priv->dmatsk); + else + bcom_ata_tx_prepare(priv->dmatsk); + + for_each_sg(qc->sg, sg, qc->n_elem, si) { + dma_addr_t cur_addr = sg_dma_address(sg); + u32 cur_len = sg_dma_len(sg); + + while (cur_len) { + unsigned int tc = min(cur_len, MAX_DMA_BUFFER_SIZE); + bd = (struct bcom_ata_bd *) + bcom_prepare_next_buffer(priv->dmatsk); + + if (read) { + bd->status = tc; + bd->src_pa = (__force u32) priv->ata_regs_pa + + offsetof(struct mpc52xx_ata, fifo_data); + bd->dst_pa = (__force u32) cur_addr; + } else { + bd->status = tc; + bd->src_pa = (__force u32) cur_addr; + bd->dst_pa = (__force u32) priv->ata_regs_pa + + offsetof(struct mpc52xx_ata, fifo_data); + } + + bcom_submit_next_buffer(priv->dmatsk, NULL); + + cur_addr += tc; + cur_len -= tc; + count++; + + if (count > MAX_DMA_BUFFERS) { + dev_alert(ap->dev, "dma table" + "too small\n"); + goto use_pio_instead; + } + } + } + return 1; + + use_pio_instead: + bcom_ata_reset_bd(priv->dmatsk); + return 0; +} + +static void +mpc52xx_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct mpc52xx_ata_priv *priv = ap->host->private_data; + struct mpc52xx_ata __iomem *regs = priv->ata_regs; + + unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE); + u8 dma_mode; + + if (!mpc52xx_ata_build_dmatable(qc)) + dev_alert(ap->dev, "%s: %i, return 1?\n", + __func__, __LINE__); + + /* Check FIFO is OK... */ + if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR) + dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n", + __func__, in_8(&priv->ata_regs->fifo_status)); + + if (read) { + dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_READ | + MPC52xx_ATA_DMAMODE_FE; + + /* Setup FIFO if direction changed */ + if (priv->mpc52xx_ata_dma_last_write != 0) { + priv->mpc52xx_ata_dma_last_write = 0; + + /* Configure FIFO with granularity to 7 */ + out_8(®s->fifo_control, 7); + out_be16(®s->fifo_alarm, 128); + + /* Set FIFO Reset bit (FR) */ + out_8(®s->dma_mode, MPC52xx_ATA_DMAMODE_FR); + } + } else { + dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_WRITE; + + /* Setup FIFO if direction changed */ + if (priv->mpc52xx_ata_dma_last_write != 1) { + priv->mpc52xx_ata_dma_last_write = 1; + + /* Configure FIFO with granularity to 4 */ + out_8(®s->fifo_control, 4); + out_be16(®s->fifo_alarm, 128); + } + } + + if (priv->timings[qc->dev->devno].using_udma) + dma_mode |= MPC52xx_ATA_DMAMODE_UDMA; + + out_8(®s->dma_mode, dma_mode); + priv->waiting_for_dma = ATA_DMA_ACTIVE; + + ata_wait_idle(ap); + ap->ops->sff_exec_command(ap, &qc->tf); +} + +static void +mpc52xx_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct mpc52xx_ata_priv *priv = ap->host->private_data; + + bcom_set_task_auto_start(priv->dmatsk->tasknum, priv->dmatsk->tasknum); + bcom_enable(priv->dmatsk); +} + +static void +mpc52xx_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct mpc52xx_ata_priv *priv = ap->host->private_data; + + bcom_disable(priv->dmatsk); + bcom_ata_reset_bd(priv->dmatsk); + priv->waiting_for_dma = 0; + + /* Check FIFO is OK... */ + if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR) + dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n", + __func__, in_8(&priv->ata_regs->fifo_status)); +} + +static u8 +mpc52xx_bmdma_status(struct ata_port *ap) +{ + struct mpc52xx_ata_priv *priv = ap->host->private_data; + + /* Check FIFO is OK... */ + if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR) { + dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n", + __func__, in_8(&priv->ata_regs->fifo_status)); + return priv->waiting_for_dma | ATA_DMA_ERR; + } + + return priv->waiting_for_dma; +} + +static irqreturn_t +mpc52xx_ata_task_irq(int irq, void *vpriv) +{ + struct mpc52xx_ata_priv *priv = vpriv; + while (bcom_buffer_done(priv->dmatsk)) + bcom_retrieve_buffer(priv->dmatsk, NULL, NULL); + + priv->waiting_for_dma |= ATA_DMA_INTR; + + return IRQ_HANDLED; +} + +static struct scsi_host_template mpc52xx_ata_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static struct ata_port_operations mpc52xx_ata_port_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_dev_select = mpc52xx_ata_dev_select, + .set_piomode = mpc52xx_ata_set_piomode, + .set_dmamode = mpc52xx_ata_set_dmamode, + .bmdma_setup = mpc52xx_bmdma_setup, + .bmdma_start = mpc52xx_bmdma_start, + .bmdma_stop = mpc52xx_bmdma_stop, + .bmdma_status = mpc52xx_bmdma_status, + .qc_prep = ata_noop_qc_prep, +}; + +static int mpc52xx_ata_init_one(struct device *dev, + struct mpc52xx_ata_priv *priv, + unsigned long raw_ata_regs, + int mwdma_mask, int udma_mask) +{ + struct ata_host *host; + struct ata_port *ap; + struct ata_ioports *aio; + + host = ata_host_alloc(dev, 1); + if (!host) + return -ENOMEM; + + ap = host->ports[0]; + ap->flags |= ATA_FLAG_SLAVE_POSS; + ap->pio_mask = ATA_PIO4; + ap->mwdma_mask = mwdma_mask; + ap->udma_mask = udma_mask; + ap->ops = &mpc52xx_ata_port_ops; + host->private_data = priv; + + aio = &ap->ioaddr; + aio->cmd_addr = NULL; /* Don't have a classic reg block */ + aio->altstatus_addr = &priv->ata_regs->tf_control; + aio->ctl_addr = &priv->ata_regs->tf_control; + aio->data_addr = &priv->ata_regs->tf_data; + aio->error_addr = &priv->ata_regs->tf_features; + aio->feature_addr = &priv->ata_regs->tf_features; + aio->nsect_addr = &priv->ata_regs->tf_sec_count; + aio->lbal_addr = &priv->ata_regs->tf_sec_num; + aio->lbam_addr = &priv->ata_regs->tf_cyl_low; + aio->lbah_addr = &priv->ata_regs->tf_cyl_high; + aio->device_addr = &priv->ata_regs->tf_dev_head; + aio->status_addr = &priv->ata_regs->tf_command; + aio->command_addr = &priv->ata_regs->tf_command; + + ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); + + /* activate host */ + return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0, + &mpc52xx_ata_sht); +} + +/* ======================================================================== */ +/* OF Platform driver */ +/* ======================================================================== */ + +static int mpc52xx_ata_probe(struct platform_device *op) +{ + unsigned int ipb_freq; + struct resource res_mem; + int ata_irq = 0; + struct mpc52xx_ata __iomem *ata_regs; + struct mpc52xx_ata_priv *priv = NULL; + int rv, task_irq; + int mwdma_mask = 0, udma_mask = 0; + const __be32 *prop; + int proplen; + struct bcom_task *dmatsk; + + /* Get ipb frequency */ + ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node); + if (!ipb_freq) { + dev_err(&op->dev, "could not determine IPB bus frequency\n"); + return -ENODEV; + } + + /* Get device base address from device tree, request the region + * and ioremap it. */ + rv = of_address_to_resource(op->dev.of_node, 0, &res_mem); + if (rv) { + dev_err(&op->dev, "could not determine device base address\n"); + return rv; + } + + if (!devm_request_mem_region(&op->dev, res_mem.start, + sizeof(*ata_regs), DRV_NAME)) { + dev_err(&op->dev, "error requesting register region\n"); + return -EBUSY; + } + + ata_regs = devm_ioremap(&op->dev, res_mem.start, sizeof(*ata_regs)); + if (!ata_regs) { + dev_err(&op->dev, "error mapping device registers\n"); + return -ENOMEM; + } + + /* + * By default, all DMA modes are disabled for the MPC5200. Some + * boards don't have the required signals routed to make DMA work. + * Also, the MPC5200B has a silicon bug that causes data corruption + * with UDMA if it is used at the same time as the LocalPlus bus. + * + * Instead of trying to guess what modes are usable, check the + * ATA device tree node to find out what DMA modes work on the board. + * UDMA/MWDMA modes can also be forced by adding "libata.force=<mode>" + * to the kernel boot parameters. + * + * The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and + * UDMA modes 0, 1 and 2. + */ + prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen); + if ((prop) && (proplen >= 4)) + mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1); + prop = of_get_property(op->dev.of_node, "udma-mode", &proplen); + if ((prop) && (proplen >= 4)) + udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1); + + ata_irq = irq_of_parse_and_map(op->dev.of_node, 0); + if (ata_irq == NO_IRQ) { + dev_err(&op->dev, "error mapping irq\n"); + return -EINVAL; + } + + /* Prepare our private structure */ + priv = devm_kzalloc(&op->dev, sizeof(*priv), GFP_ATOMIC); + if (!priv) { + dev_err(&op->dev, "error allocating private structure\n"); + rv = -ENOMEM; + goto err1; + } + + priv->ipb_period = 1000000000 / (ipb_freq / 1000); + priv->ata_regs = ata_regs; + priv->ata_regs_pa = res_mem.start; + priv->ata_irq = ata_irq; + priv->csel = -1; + priv->mpc52xx_ata_dma_last_write = -1; + + if (ipb_freq/1000000 == 66) { + priv->mdmaspec = mdmaspec66; + priv->udmaspec = udmaspec66; + } else { + priv->mdmaspec = mdmaspec132; + priv->udmaspec = udmaspec132; + } + + /* Allocate a BestComm task for DMA */ + dmatsk = bcom_ata_init(MAX_DMA_BUFFERS, MAX_DMA_BUFFER_SIZE); + if (!dmatsk) { + dev_err(&op->dev, "bestcomm initialization failed\n"); + rv = -ENOMEM; + goto err1; + } + + task_irq = bcom_get_task_irq(dmatsk); + rv = devm_request_irq(&op->dev, task_irq, &mpc52xx_ata_task_irq, 0, + "ATA task", priv); + if (rv) { + dev_err(&op->dev, "error requesting DMA IRQ\n"); + goto err2; + } + priv->dmatsk = dmatsk; + + /* Init the hw */ + rv = mpc52xx_ata_hw_init(priv); + if (rv) { + dev_err(&op->dev, "error initializing hardware\n"); + goto err2; + } + + /* Register ourselves to libata */ + rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start, + mwdma_mask, udma_mask); + if (rv) { + dev_err(&op->dev, "error registering with ATA layer\n"); + goto err2; + } + + return 0; + + err2: + irq_dispose_mapping(task_irq); + bcom_ata_release(dmatsk); + err1: + irq_dispose_mapping(ata_irq); + return rv; +} + +static int +mpc52xx_ata_remove(struct platform_device *op) +{ + struct ata_host *host = platform_get_drvdata(op); + struct mpc52xx_ata_priv *priv = host->private_data; + int task_irq; + + /* Deregister the ATA interface */ + ata_platform_remove_one(op); + + /* Clean up DMA */ + task_irq = bcom_get_task_irq(priv->dmatsk); + irq_dispose_mapping(task_irq); + bcom_ata_release(priv->dmatsk); + irq_dispose_mapping(priv->ata_irq); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int +mpc52xx_ata_suspend(struct platform_device *op, pm_message_t state) +{ + struct ata_host *host = platform_get_drvdata(op); + + return ata_host_suspend(host, state); +} + +static int +mpc52xx_ata_resume(struct platform_device *op) +{ + struct ata_host *host = platform_get_drvdata(op); + struct mpc52xx_ata_priv *priv = host->private_data; + int rv; + + rv = mpc52xx_ata_hw_init(priv); + if (rv) { + dev_err(host->dev, "error initializing hardware\n"); + return rv; + } + + ata_host_resume(host); + + return 0; +} +#endif + +static struct of_device_id mpc52xx_ata_of_match[] = { + { .compatible = "fsl,mpc5200-ata", }, + { .compatible = "mpc5200-ata", }, + {}, +}; + + +static struct platform_driver mpc52xx_ata_of_platform_driver = { + .probe = mpc52xx_ata_probe, + .remove = mpc52xx_ata_remove, +#ifdef CONFIG_PM_SLEEP + .suspend = mpc52xx_ata_suspend, + .resume = mpc52xx_ata_resume, +#endif + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = mpc52xx_ata_of_match, + }, +}; + +module_platform_driver(mpc52xx_ata_of_platform_driver); + +MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>"); +MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(of, mpc52xx_ata_of_match); + diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c index 3c65393c1f0..202b4d60139 100644 --- a/drivers/ata/pata_mpiix.c +++ b/drivers/ata/pata_mpiix.c @@ -1,7 +1,7 @@ /* * pata_mpiix.c - Intel MPIIX PATA for new ATA layer * (C) 2005-2006 Red Hat Inc - * Alan Cox <alan@redhat.com> + * Alan Cox <alan@lxorguk.ukuu.org.uk> * * The MPIIX is different enough to the PIIX4 and friends that we give it * a separate driver. The old ide/pci code handles this by just not tuning @@ -15,7 +15,7 @@ * with PCI IDE and also that we do not disable the device when our driver is * unloaded (as it has many other functions). * - * The driver conciously keeps this logic internally to avoid pushing quirky + * The driver consciously keeps this logic internally to avoid pushing quirky * PATA history into the clean libata layer. * * Thinkpad specific note: If you boot an MPIIX using a thinkpad with a PCMCIA @@ -28,14 +28,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_mpiix" -#define DRV_VERSION "0.7.2" +#define DRV_VERSION "0.7.7" enum { IDETIM = 0x6C, /* IDE control register */ @@ -46,32 +45,16 @@ enum { SECONDARY = (1 << 14) }; -static int mpiix_pre_reset(struct ata_port *ap) +static int mpiix_pre_reset(struct ata_link *link, unsigned long deadline) { + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - static const struct pci_bits mpiix_enable_bits[] = { - { 0x6D, 1, 0x80, 0x80 }, - { 0x6F, 1, 0x80, 0x80 } - }; + static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 }; - if (!pci_test_config_bits(pdev, &mpiix_enable_bits[ap->port_no])) + if (!pci_test_config_bits(pdev, &mpiix_enable_bits)) return -ENOENT; - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -/** - * mpiix_error_handler - probe reset - * @ap: ATA port - * - * Perform the ATA probe and bus reset sequence plus specific handling - * for this hardware. The MPIIX has the enable bits in a different place - * to PIIX4 and friends. As a pure PIO device it has no cable detect - */ -static void mpiix_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, mpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + return ata_sff_prereset(link, deadline); } /** @@ -80,13 +63,13 @@ static void mpiix_error_handler(struct ata_port *ap) * @adev: ATA device * * Called to do the PIO mode setup. The MPIIX allows us to program the - * IORDY sample point (2-5 clocks), recovery 1-4 clocks and whether - * prefetching or iordy are used. + * IORDY sample point (2-5 clocks), recovery (1-4 clocks) and whether + * prefetching or IORDY are used. * * This would get very ugly because we can only program timing for one * device at a time, the other gets PIO0. Fortunately libata calls - * our qc_issue_prot command before a command is issued so we can - * flip the timings back and forth to reduce the pain. + * our qc_issue command before a command is issued so we can flip the + * timings back and forth to reduce the pain. */ static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev) @@ -103,18 +86,19 @@ static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev) { 2, 3 }, }; pci_read_config_word(pdev, IDETIM, &idetim); - /* Mask the IORDY/TIME/PPE0 bank for this device */ + + /* Mask the IORDY/TIME/PPE for this device */ if (adev->class == ATA_DEV_ATA) - control |= PPE; /* PPE enable for disk */ + control |= PPE; /* Enable prefetch/posting for disk */ if (ata_pio_need_iordy(adev)) - control |= IORDY; /* IORDY */ - if (pio > 0) + control |= IORDY; + if (pio > 1) control |= FTIM; /* This drive is on the fast timing bank */ /* Mask out timing and clear both TIME bank selects */ idetim &= 0xCCEE; - idetim &= ~(0x07 << (2 * adev->devno)); - idetim |= (control << (2 * adev->devno)); + idetim &= ~(0x07 << (4 * adev->devno)); + idetim |= control << (4 * adev->devno); idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8); pci_write_config_word(pdev, IDETIM, idetim); @@ -125,17 +109,17 @@ static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev) } /** - * mpiix_qc_issue_prot - command issue + * mpiix_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if - * neccessary. Our logic also clears TIME0/TIME1 for the other device so + * necessary. Our logic also clears TIME0/TIME1 for the other device so * that, even if we get this wrong, cycles to the other device will * be made PIO0. */ -static unsigned int mpiix_qc_issue_prot(struct ata_queued_cmd *qc) +static unsigned int mpiix_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; @@ -148,64 +132,37 @@ static unsigned int mpiix_qc_issue_prot(struct ata_queued_cmd *qc) if (adev->pio_mode && adev != ap->private_data) mpiix_set_piomode(ap, adev); - return ata_qc_issue_prot(qc); + return ata_sff_qc_issue(qc); } static struct scsi_host_template mpiix_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations mpiix_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_sff_port_ops, + .qc_issue = mpiix_qc_issue, + .cable_detect = ata_cable_40wire, .set_piomode = mpiix_set_piomode, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = mpiix_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .qc_prep = ata_qc_prep, - .qc_issue = mpiix_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .prereset = mpiix_pre_reset, + .sff_data_xfer = ata_sff_data_xfer32, }; static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) { /* Single threaded by the PCI probe logic */ - static struct ata_probe_ent probe[2]; - static int printed_version; + struct ata_host *host; + struct ata_port *ap; + void __iomem *cmd_addr, *ctl_addr; u16 idetim; - int enabled; + int cmd, ctl, irq; + + ata_print_version_once(&dev->dev, DRV_VERSION); - if (!printed_version++) - dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); + host = ata_host_alloc(&dev->dev, 1); + if (!host) + return -ENOMEM; + ap = host->ports[0]; /* MPIIX has many functions which can be turned on or off according to other devices present. Make sure IDE is enabled before we try @@ -215,96 +172,67 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) if (!(idetim & ENABLED)) return -ENODEV; + /* See if it's primary or secondary channel... */ + if (!(idetim & SECONDARY)) { + cmd = 0x1F0; + ctl = 0x3F6; + irq = 14; + } else { + cmd = 0x170; + ctl = 0x376; + irq = 15; + } + + cmd_addr = devm_ioport_map(&dev->dev, cmd, 8); + ctl_addr = devm_ioport_map(&dev->dev, ctl, 1); + if (!cmd_addr || !ctl_addr) + return -ENOMEM; + + ata_port_desc(ap, "cmd 0x%x ctl 0x%x", cmd, ctl); + /* We do our own plumbing to avoid leaking special cases for whacko ancient hardware into the core code. There are two issues to worry about. #1 The chip is a bridge so if in legacy mode and without BARs set fools the setup. #2 If you pci_disable_device the MPIIX your box goes castors up */ - INIT_LIST_HEAD(&probe[0].node); - probe[0].dev = pci_dev_to_dev(dev); - probe[0].port_ops = &mpiix_port_ops; - probe[0].sht = &mpiix_sht; - probe[0].pio_mask = 0x1F; - probe[0].irq = 14; - probe[0].irq_flags = SA_SHIRQ; - probe[0].port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST; - probe[0].n_ports = 1; - probe[0].port[0].cmd_addr = 0x1F0; - probe[0].port[0].ctl_addr = 0x3F6; - probe[0].port[0].altstatus_addr = 0x3F6; - - /* The secondary lurks at different addresses but is otherwise - the same beastie */ - - INIT_LIST_HEAD(&probe[1].node); - probe[1] = probe[0]; - probe[1].irq = 15; - probe[1].port[0].cmd_addr = 0x170; - probe[1].port[0].ctl_addr = 0x376; - probe[1].port[0].altstatus_addr = 0x376; - - /* Let libata fill in the port details */ - ata_std_ports(&probe[0].port[0]); - ata_std_ports(&probe[1].port[0]); + ap->ops = &mpiix_port_ops; + ap->pio_mask = ATA_PIO4; + ap->flags |= ATA_FLAG_SLAVE_POSS; - /* Now add the port that is active */ - enabled = (idetim & SECONDARY) ? 1 : 0; + ap->ioaddr.cmd_addr = cmd_addr; + ap->ioaddr.ctl_addr = ctl_addr; + ap->ioaddr.altstatus_addr = ctl_addr; - if (ata_device_add(&probe[enabled])) - return 0; - return -ENODEV; -} - -/** - * mpiix_remove_one - device unload - * @pdev: PCI device being removed - * - * Handle an unplug/unload event for a PCI device. Unload the - * PCI driver but do not use the default handler as we *MUST NOT* - * disable the device as it has other functions. - */ - -static void __devexit mpiix_remove_one(struct pci_dev *pdev) -{ - struct device *dev = pci_dev_to_dev(pdev); - struct ata_host *host = dev_get_drvdata(dev); + /* Let libata fill in the port details */ + ata_sff_std_ports(&ap->ioaddr); - ata_host_remove(host); - dev_set_drvdata(dev, NULL); + /* activate host */ + return ata_host_activate(host, irq, ata_sff_interrupt, IRQF_SHARED, + &mpiix_sht); } - - static const struct pci_device_id mpiix[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371MX), }, - { 0, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), }, + + { }, }; static struct pci_driver mpiix_pci_driver = { .name = DRV_NAME, .id_table = mpiix, .probe = mpiix_init_one, - .remove = mpiix_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init mpiix_init(void) -{ - return pci_register_driver(&mpiix_pci_driver); -} - - -static void __exit mpiix_exit(void) -{ - pci_unregister_driver(&mpiix_pci_driver); -} - +module_pci_driver(mpiix_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Intel MPIIX"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, mpiix); MODULE_VERSION(DRV_VERSION); - -module_init(mpiix_init); -module_exit(mpiix_exit); diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c index 76eb9c90bee..0ea18331d46 100644 --- a/drivers/ata/pata_netcell.c +++ b/drivers/ata/pata_netcell.c @@ -1,13 +1,12 @@ /* * pata_netcell.c - Netcell PATA driver * - * (c) 2006 Red Hat <alan@redhat.com> + * (c) 2006 Red Hat */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> @@ -16,88 +15,28 @@ #include <linux/ata.h> #define DRV_NAME "pata_netcell" -#define DRV_VERSION "0.1.5" +#define DRV_VERSION "0.1.7" -/** - * netcell_probe_init - check for 40/80 pin - * @ap: Port - * - * Cables are handled by the RAID controller. Report 80 pin. - */ - -static int netcell_pre_reset(struct ata_port *ap) -{ - ap->cbl = ATA_CBL_PATA80; - return ata_std_prereset(ap); -} - -/** - * netcell_probe_reset - Probe specified port on PATA host controller - * @ap: Port to probe - * - * LOCKING: - * None (inherited from caller). - */ +/* No PIO or DMA methods needed for this device */ -static void netcell_error_handler(struct ata_port *ap) +static unsigned int netcell_read_id(struct ata_device *adev, + struct ata_taskfile *tf, u16 *id) { - return ata_bmdma_drive_eh(ap, netcell_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + unsigned int err_mask = ata_do_dev_read_id(adev, tf, id); + /* Firmware forgets to mark words 85-87 valid */ + if (err_mask == 0) + id[ATA_ID_CSF_DEFAULT] |= 0x4000; + return err_mask; } -/* No PIO or DMA methods needed for this device */ - static struct scsi_host_template netcell_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - /* Special handling needed if you have sector or LBA48 limits */ - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - /* Use standard CHS mapping rules */ - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; -static const struct ata_port_operations netcell_ops = { - .port_disable = ata_port_disable, - - /* Task file is PCI ATA format, use helpers */ - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = netcell_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - /* BMDMA handling is PCI ATA format, use helpers */ - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - /* IRQ-related hooks */ - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - /* Generic PATA PCI ATA helpers */ - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, +static struct ata_port_operations netcell_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_80wire, + .read_id = netcell_read_id, }; @@ -117,32 +56,34 @@ static const struct ata_port_operations netcell_ops = { static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - static struct ata_port_info info = { - .sht = &netcell_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, /* Actually we don't really care about these as the firmware deals with it */ - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x3f, /* UDMA 133 */ + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, /* UDMA 133 */ .port_ops = &netcell_ops, }; - static struct ata_port_info *port_info[2] = { &info, &info }; + const struct ata_port_info *port_info[] = { &info, NULL }; + int rc; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, - "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); + + rc = pcim_enable_device(pdev); + if (rc) + return rc; /* Any chip specific setup/optimisation/messages here */ - ata_pci_clear_simplex(pdev); + ata_pci_bmdma_clear_simplex(pdev); /* And let the library code do the work */ - return ata_pci_init_one(pdev, port_info, 2); + return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0); } static const struct pci_device_id netcell_pci_tbl[] = { - { PCI_DEVICE(PCI_VENDOR_ID_NETCELL, PCI_DEVICE_ID_REVOLUTION), }, + { PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), }, + { } /* terminate list */ }; @@ -151,24 +92,16 @@ static struct pci_driver netcell_pci_driver = { .id_table = netcell_pci_tbl, .probe = netcell_init_one, .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init netcell_init(void) -{ - return pci_register_driver(&netcell_pci_driver); -} - -static void __exit netcell_exit(void) -{ - pci_unregister_driver(&netcell_pci_driver); -} - -module_init(netcell_init); -module_exit(netcell_exit); +module_pci_driver(netcell_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for Netcell PATA RAID"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, netcell_pci_tbl); MODULE_VERSION(DRV_VERSION); - diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c new file mode 100644 index 00000000000..efb272da856 --- /dev/null +++ b/drivers/ata/pata_ninja32.c @@ -0,0 +1,197 @@ +/* + * pata_ninja32.c - Ninja32 PATA for new ATA layer + * (C) 2007 Red Hat Inc + * + * Note: The controller like many controllers has shared timings for + * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back + * in the dma_stop function. Thus we actually don't need a set_dmamode + * method as the PIO method is always called and will set the right PIO + * timing parameters. + * + * The Ninja32 Cardbus is not a generic SFF controller. Instead it is + * laid out as follows off BAR 0. This is based upon Mark Lord's delkin + * driver and the extensive analysis done by the BSD developers, notably + * ITOH Yasufumi. + * + * Base + 0x00 IRQ Status + * Base + 0x01 IRQ control + * Base + 0x02 Chipset control + * Base + 0x03 Unknown + * Base + 0x04 VDMA and reset control + wait bits + * Base + 0x08 BMIMBA + * Base + 0x0C DMA Length + * Base + 0x10 Taskfile + * Base + 0x18 BMDMA Status ? + * Base + 0x1C + * Base + 0x1D Bus master control + * bit 0 = enable + * bit 1 = 0 write/1 read + * bit 2 = 1 sgtable + * bit 3 = go + * bit 4-6 wait bits + * bit 7 = done + * Base + 0x1E AltStatus + * Base + 0x1F timing register + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> + +#define DRV_NAME "pata_ninja32" +#define DRV_VERSION "0.1.5" + + +/** + * ninja32_set_piomode - set initial PIO mode data + * @ap: ATA interface + * @adev: ATA device + * + * Called to do the PIO mode setup. Our timing registers are shared + * but we want to set the PIO timing by default. + */ + +static void ninja32_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + static u16 pio_timing[5] = { + 0xd6, 0x85, 0x44, 0x33, 0x13 + }; + iowrite8(pio_timing[adev->pio_mode - XFER_PIO_0], + ap->ioaddr.bmdma_addr + 0x1f); + ap->private_data = adev; +} + + +static void ninja32_dev_select(struct ata_port *ap, unsigned int device) +{ + struct ata_device *adev = &ap->link.device[device]; + if (ap->private_data != adev) { + iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f); + ata_sff_dev_select(ap, device); + ninja32_set_piomode(ap, adev); + } +} + +static struct scsi_host_template ninja32_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations ninja32_port_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_dev_select = ninja32_dev_select, + .cable_detect = ata_cable_40wire, + .set_piomode = ninja32_set_piomode, + .sff_data_xfer = ata_sff_data_xfer32 +}; + +static void ninja32_program(void __iomem *base) +{ + iowrite8(0x05, base + 0x01); /* Enable interrupt lines */ + iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */ + iowrite8(0x01, base + 0x03); /* Unknown */ + iowrite8(0x20, base + 0x04); /* WAIT0 */ + iowrite8(0x8f, base + 0x05); /* Unknown */ + iowrite8(0xa4, base + 0x1c); /* Unknown */ + iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */ +} + +static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct ata_host *host; + struct ata_port *ap; + void __iomem *base; + int rc; + + host = ata_host_alloc(&dev->dev, 1); + if (!host) + return -ENOMEM; + ap = host->ports[0]; + + /* Set up the PCI device */ + rc = pcim_enable_device(dev); + if (rc) + return rc; + rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(dev); + if (rc) + return rc; + + host->iomap = pcim_iomap_table(dev); + rc = pci_set_dma_mask(dev, ATA_DMA_MASK); + if (rc) + return rc; + rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK); + if (rc) + return rc; + pci_set_master(dev); + + /* Set up the register mappings. We use the I/O mapping as only the + older chips also have MMIO on BAR 1 */ + base = host->iomap[0]; + if (!base) + return -ENOMEM; + ap->ops = &ninja32_port_ops; + ap->pio_mask = ATA_PIO4; + ap->flags |= ATA_FLAG_SLAVE_POSS; + + ap->ioaddr.cmd_addr = base + 0x10; + ap->ioaddr.ctl_addr = base + 0x1E; + ap->ioaddr.altstatus_addr = base + 0x1E; + ap->ioaddr.bmdma_addr = base; + ata_sff_std_ports(&ap->ioaddr); + ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; + + ninja32_program(base); + /* FIXME: Should we disable them at remove ? */ + return ata_host_activate(host, dev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &ninja32_sht); +} + +#ifdef CONFIG_PM_SLEEP +static int ninja32_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + ninja32_program(host->iomap[0]); + ata_host_resume(host); + return 0; +} +#endif + +static const struct pci_device_id ninja32[] = { + { 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { }, +}; + +static struct pci_driver ninja32_pci_driver = { + .name = DRV_NAME, + .id_table = ninja32, + .probe = ninja32_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ninja32_reinit_one, +#endif +}; + +module_pci_driver(ninja32_pci_driver); + +MODULE_AUTHOR("Alan Cox"); +MODULE_DESCRIPTION("low-level driver for Ninja32 ATA"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, ninja32); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c index 2005a95f48f..200e1eb23a2 100644 --- a/drivers/ata/pata_ns87410.c +++ b/drivers/ata/pata_ns87410.c @@ -1,7 +1,6 @@ /* * pata_ns87410.c - National Semiconductor 87410 PATA for new ATA layer * (C) 2006 Red Hat Inc - * Alan Cox <alan@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,24 +20,25 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_ns87410" -#define DRV_VERSION "0.4.2" +#define DRV_VERSION "0.4.6" /** * ns87410_pre_reset - probe begin - * @ap: ATA port + * @link: ATA link + * @deadline: deadline jiffies for the operation * - * Set up cable type and use generic probe init + * Check enabled ports */ -static int ns87410_pre_reset(struct ata_port *ap) +static int ns87410_pre_reset(struct ata_link *link, unsigned long deadline) { + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits ns87410_enable_bits[] = { { 0x43, 1, 0x08, 0x08 }, @@ -47,22 +47,8 @@ static int ns87410_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) return -ENOENT; - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -/** - * ns87410_error_handler - probe reset - * @ap: ATA port - * - * Perform the ATA probe and bus reset sequence plus specific handling - * for this hardware. The MPIIX has the enable bits in a different place - * to PIIX4 and friends. As a pure PIO device it has no cable detect - */ -static void ns87410_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, ns87410_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + return ata_sff_prereset(link, deadline); } /** @@ -99,13 +85,13 @@ static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev) idefr &= ~0x04; if (ata_timing_compute(adev, adev->pio_mode, &at, 30303, 1) < 0) { - dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", adev->pio_mode); + dev_err(&pdev->dev, "unknown mode %d\n", adev->pio_mode); return; } - at.active = FIT(at.active, 2, 16) - 2; - at.setup = FIT(at.setup, 1, 4) - 1; - at.recover = FIT(at.recover, 1, 12) - 1; + at.active = clamp_val(at.active, 2, 16) - 2; + at.setup = clamp_val(at.setup, 1, 4) - 1; + at.recover = clamp_val(at.recover, 1, 12) - 1; idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active]; @@ -117,15 +103,15 @@ static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev) } /** - * ns87410_qc_issue_prot - command issue + * ns87410_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if - * neccessary. + * necessary. */ -static unsigned int ns87410_qc_issue_prot(struct ata_queued_cmd *qc) +static unsigned int ns87410_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; @@ -138,96 +124,53 @@ static unsigned int ns87410_qc_issue_prot(struct ata_queued_cmd *qc) if (adev->pio_mode && adev != ap->private_data) ns87410_set_piomode(ap, adev); - return ata_qc_issue_prot(qc); + return ata_sff_qc_issue(qc); } static struct scsi_host_template ns87410_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations ns87410_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_sff_port_ops, + .qc_issue = ns87410_qc_issue, + .cable_detect = ata_cable_40wire, .set_piomode = ns87410_set_piomode, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ns87410_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .qc_prep = ata_qc_prep, - .qc_issue = ns87410_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .prereset = ns87410_pre_reset, }; static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - static struct ata_port_info info = { - .sht = &ns87410_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x0F, + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO3, .port_ops = &ns87410_port_ops }; - static struct ata_port_info *port_info[2] = {&info, &info}; - return ata_pci_init_one(dev, port_info, 2); + const struct ata_port_info *ppi[] = { &info, NULL }; + return ata_pci_sff_init_one(dev, ppi, &ns87410_sht, NULL, 0); } static const struct pci_device_id ns87410[] = { - { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87410), }, - { 0, }, + { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), }, + + { }, }; static struct pci_driver ns87410_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = ns87410, .probe = ns87410_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init ns87410_init(void) -{ - return pci_register_driver(&ns87410_pci_driver); -} - - -static void __exit ns87410_exit(void) -{ - pci_unregister_driver(&ns87410_pci_driver); -} - +module_pci_driver(ns87410_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Nat Semi 87410"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ns87410); MODULE_VERSION(DRV_VERSION); - -module_init(ns87410_init); -module_exit(ns87410_exit); diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c new file mode 100644 index 00000000000..84c6b225b56 --- /dev/null +++ b/drivers/ata/pata_ns87415.c @@ -0,0 +1,422 @@ +/* + * pata_ns87415.c - NS87415 (non PARISC) PATA + * + * (C) 2005 Red Hat <alan@lxorguk.ukuu.org.uk> + * + * This is a fairly generic MWDMA controller. It has some limitations + * as it requires timing reloads on PIO/DMA transitions but it is otherwise + * fairly well designed. + * + * This driver assumes the firmware has left the chip in a valid ST506 + * compliant state, either legacy IRQ 14/15 or native INTA shared. You + * may need to add platform code if your system fails to do this. + * + * The same cell appears in the 87560 controller used by some PARISC + * systems. This has its own special mountain of errata. + * + * TODO: + * Test PARISC SuperIO + * Get someone to test on SPARC + * Implement lazy pio/dma switching for better performance + * 8bit shared timing. + * See if we need to kill the FIFO for ATAPI + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> +#include <linux/ata.h> + +#define DRV_NAME "pata_ns87415" +#define DRV_VERSION "0.0.1" + +/** + * ns87415_set_mode - Initialize host controller mode timings + * @ap: Port whose timings we are configuring + * @adev: Device whose timings we are configuring + * @mode: Mode to set + * + * Program the mode registers for this controller, channel and + * device. Because the chip is quite an old design we have to do this + * for PIO/DMA switches. + * + * LOCKING: + * None (inherited from caller). + */ + +static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) +{ + struct pci_dev *dev = to_pci_dev(ap->host->dev); + int unit = 2 * ap->port_no + adev->devno; + int timing = 0x44 + 2 * unit; + unsigned long T = 1000000000 / 33333; /* PCI clocks */ + struct ata_timing t; + u16 clocking; + u8 iordy; + u8 status; + + /* Timing register format is 17 - low nybble read timing with + the high nybble being 16 - x for recovery time in PCI clocks */ + + ata_timing_compute(adev, adev->pio_mode, &t, T, 0); + + clocking = 17 - clamp_val(t.active, 2, 17); + clocking |= (16 - clamp_val(t.recover, 1, 16)) << 4; + /* Use the same timing for read and write bytes */ + clocking |= (clocking << 8); + pci_write_config_word(dev, timing, clocking); + + /* Set the IORDY enable versus DMA enable on or off properly */ + pci_read_config_byte(dev, 0x42, &iordy); + iordy &= ~(1 << (4 + unit)); + if (mode >= XFER_MW_DMA_0 || !ata_pio_need_iordy(adev)) + iordy |= (1 << (4 + unit)); + + /* Paranoia: We shouldn't ever get here with busy write buffers + but if so wait */ + + pci_read_config_byte(dev, 0x43, &status); + while (status & 0x03) { + udelay(1); + pci_read_config_byte(dev, 0x43, &status); + } + /* Flip the IORDY/DMA bits now we are sure the write buffers are + clear */ + pci_write_config_byte(dev, 0x42, iordy); + + /* TODO: Set byte 54 command timing to the best 8bit + mode shared by all four devices */ +} + +/** + * ns87415_set_piomode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: Device to program + * + * Set PIO mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void ns87415_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + ns87415_set_mode(ap, adev, adev->pio_mode); +} + +/** + * ns87415_bmdma_setup - Set up DMA + * @qc: Command block + * + * Set up for bus masterng DMA. We have to do this ourselves + * rather than use the helper due to a chip erratum + */ + +static void ns87415_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + u8 dmactl; + + /* load PRD table addr. */ + mb(); /* make sure PRD table writes are visible to controller */ + iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); + + /* specify data direction, triple-check start bit is clear */ + dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); + /* Due to an erratum we need to write these bits to the wrong + place - which does save us an I/O bizarrely */ + dmactl |= ATA_DMA_INTR | ATA_DMA_ERR; + if (!rw) + dmactl |= ATA_DMA_WR; + iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} + +/** + * ns87415_bmdma_start - Begin DMA transfer + * @qc: Command block + * + * Switch the timings for the chip and set up for a DMA transfer + * before the DMA burst begins. + * + * FIXME: We should do lazy switching on bmdma_start versus + * ata_pio_data_xfer for better performance. + */ + +static void ns87415_bmdma_start(struct ata_queued_cmd *qc) +{ + ns87415_set_mode(qc->ap, qc->dev, qc->dev->dma_mode); + ata_bmdma_start(qc); +} + +/** + * ns87415_bmdma_stop - End DMA transfer + * @qc: Command block + * + * End DMA mode and switch the controller back into PIO mode + */ + +static void ns87415_bmdma_stop(struct ata_queued_cmd *qc) +{ + ata_bmdma_stop(qc); + ns87415_set_mode(qc->ap, qc->dev, qc->dev->pio_mode); +} + +/** + * ns87415_irq_clear - Clear interrupt + * @ap: Channel to clear + * + * Erratum: Due to a chip bug regisers 02 and 0A bit 1 and 2 (the + * error bits) are reset by writing to register 00 or 08. + */ + +static void ns87415_irq_clear(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + if (!mmio) + return; + iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR), + mmio + ATA_DMA_CMD); +} + +/** + * ns87415_check_atapi_dma - ATAPI DMA filter + * @qc: Command block + * + * Disable ATAPI DMA (for now). We may be able to do DMA if we + * kill the prefetching. This isn't clear. + */ + +static int ns87415_check_atapi_dma(struct ata_queued_cmd *qc) +{ + return -EOPNOTSUPP; +} + +#if defined(CONFIG_SUPERIO) + +/* SUPERIO 87560 is a PoS chip that NatSem denies exists. + * Unfortunately, it's built-in on all Astro-based PA-RISC workstations + * which use the integrated NS87514 cell for CD-ROM support. + * i.e we have to support for CD-ROM installs. + * See drivers/parisc/superio.c for more gory details. + * + * Workarounds taken from drivers/ide/pci/ns87415.c + */ + +#include <asm/superio.h> + +#define SUPERIO_IDE_MAX_RETRIES 25 + +/** + * ns87560_read_buggy - workaround buggy Super I/O chip + * @port: Port to read + * + * Work around chipset problems in the 87560 SuperIO chip + */ + +static u8 ns87560_read_buggy(void __iomem *port) +{ + u8 tmp; + int retries = SUPERIO_IDE_MAX_RETRIES; + do { + tmp = ioread8(port); + if (tmp != 0) + return tmp; + udelay(50); + } while(retries-- > 0); + return tmp; +} + +/** + * ns87560_check_status + * @ap: channel to check + * + * Return the status of the channel working around the + * 87560 flaws. + */ + +static u8 ns87560_check_status(struct ata_port *ap) +{ + return ns87560_read_buggy(ap->ioaddr.status_addr); +} + +/** + * ns87560_tf_read - input device's ATA taskfile shadow registers + * @ap: Port from which input is read + * @tf: ATA taskfile register set for storing input + * + * Reads ATA taskfile registers for currently-selected device + * into @tf. Work around the 87560 bugs. + * + * LOCKING: + * Inherited from caller. + */ +void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->command = ns87560_check_status(ap); + tf->feature = ioread8(ioaddr->error_addr); + tf->nsect = ioread8(ioaddr->nsect_addr); + tf->lbal = ioread8(ioaddr->lbal_addr); + tf->lbam = ioread8(ioaddr->lbam_addr); + tf->lbah = ioread8(ioaddr->lbah_addr); + tf->device = ns87560_read_buggy(ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); + tf->hob_feature = ioread8(ioaddr->error_addr); + tf->hob_nsect = ioread8(ioaddr->nsect_addr); + tf->hob_lbal = ioread8(ioaddr->lbal_addr); + tf->hob_lbam = ioread8(ioaddr->lbam_addr); + tf->hob_lbah = ioread8(ioaddr->lbah_addr); + iowrite8(tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; + } +} + +/** + * ns87560_bmdma_status + * @ap: channel to check + * + * Return the DMA status of the channel working around the + * 87560 flaws. + */ + +static u8 ns87560_bmdma_status(struct ata_port *ap) +{ + return ns87560_read_buggy(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); +} +#endif /* 87560 SuperIO Support */ + +static struct ata_port_operations ns87415_pata_ops = { + .inherits = &ata_bmdma_port_ops, + + .check_atapi_dma = ns87415_check_atapi_dma, + .bmdma_setup = ns87415_bmdma_setup, + .bmdma_start = ns87415_bmdma_start, + .bmdma_stop = ns87415_bmdma_stop, + .sff_irq_clear = ns87415_irq_clear, + + .cable_detect = ata_cable_40wire, + .set_piomode = ns87415_set_piomode, +}; + +#if defined(CONFIG_SUPERIO) +static struct ata_port_operations ns87560_pata_ops = { + .inherits = &ns87415_pata_ops, + .sff_tf_read = ns87560_tf_read, + .sff_check_status = ns87560_check_status, + .bmdma_status = ns87560_bmdma_status, +}; +#endif + +static struct scsi_host_template ns87415_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static void ns87415_fixup(struct pci_dev *pdev) +{ + /* Select 512 byte sectors */ + pci_write_config_byte(pdev, 0x55, 0xEE); + /* Select PIO0 8bit clocking */ + pci_write_config_byte(pdev, 0x54, 0xB7); +} + +/** + * ns87415_init_one - Register 87415 ATA PCI device with kernel services + * @pdev: PCI device to register + * @ent: Entry in ns87415_pci_tbl matching with @pdev + * + * Called from kernel PCI layer. We probe for combined mode (sigh), + * and then hand over control to libata, for it to do the rest. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, or -ERRNO value. + */ + +static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .port_ops = &ns87415_pata_ops, + }; + const struct ata_port_info *ppi[] = { &info, NULL }; + int rc; +#if defined(CONFIG_SUPERIO) + static const struct ata_port_info info87560 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .port_ops = &ns87560_pata_ops, + }; + + if (PCI_SLOT(pdev->devfn) == 0x0E) + ppi[0] = &info87560; +#endif + ata_print_version_once(&pdev->dev, DRV_VERSION); + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + ns87415_fixup(pdev); + + return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0); +} + +static const struct pci_device_id ns87415_pci_tbl[] = { + { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87415), }, + + { } /* terminate list */ +}; + +#ifdef CONFIG_PM_SLEEP +static int ns87415_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + ns87415_fixup(pdev); + + ata_host_resume(host); + return 0; +} +#endif + +static struct pci_driver ns87415_pci_driver = { + .name = DRV_NAME, + .id_table = ns87415_pci_tbl, + .probe = ns87415_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ns87415_reinit_one, +#endif +}; + +module_pci_driver(ns87415_pci_driver); + +MODULE_AUTHOR("Alan Cox"); +MODULE_DESCRIPTION("ATA low-level driver for NS87415 controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, ns87415_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c new file mode 100644 index 00000000000..2a97d3a531e --- /dev/null +++ b/drivers/ata/pata_octeon_cf.c @@ -0,0 +1,1080 @@ +/* + * Driver for the Octeon bootbus compact flash. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005 - 2012 Cavium Inc. + * Copyright (C) 2008 Wind River Systems + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/libata.h> +#include <linux/hrtimer.h> +#include <linux/slab.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <scsi/scsi_host.h> + +#include <asm/byteorder.h> +#include <asm/octeon/octeon.h> + +/* + * The Octeon bootbus compact flash interface is connected in at least + * 3 different configurations on various evaluation boards: + * + * -- 8 bits no irq, no DMA + * -- 16 bits no irq, no DMA + * -- 16 bits True IDE mode with DMA, but no irq. + * + * In the last case the DMA engine can generate an interrupt when the + * transfer is complete. For the first two cases only PIO is supported. + * + */ + +#define DRV_NAME "pata_octeon_cf" +#define DRV_VERSION "2.2" + +/* Poll interval in nS. */ +#define OCTEON_CF_BUSY_POLL_INTERVAL 500000 + +#define DMA_CFG 0 +#define DMA_TIM 0x20 +#define DMA_INT 0x38 +#define DMA_INT_EN 0x50 + +struct octeon_cf_port { + struct hrtimer delayed_finish; + struct ata_port *ap; + int dma_finished; + void *c0; + unsigned int cs0; + unsigned int cs1; + bool is_true_ide; + u64 dma_base; +}; + +static struct scsi_host_template octeon_cf_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static int enable_dma; +module_param(enable_dma, int, 0444); +MODULE_PARM_DESC(enable_dma, + "Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)"); + +/** + * Convert nanosecond based time to setting used in the + * boot bus timing register, based on timing multiple + */ +static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs) +{ + unsigned int val; + + /* + * Compute # of eclock periods to get desired duration in + * nanoseconds. + */ + val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000), + 1000 * tim_mult); + + return val; +} + +static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier) +{ + union cvmx_mio_boot_reg_cfgx reg_cfg; + unsigned int tim_mult; + + switch (multiplier) { + case 8: + tim_mult = 3; + break; + case 4: + tim_mult = 0; + break; + case 2: + tim_mult = 2; + break; + default: + tim_mult = 1; + break; + } + + reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs)); + reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */ + reg_cfg.s.tim_mult = tim_mult; /* Timing mutiplier */ + reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */ + reg_cfg.s.sam = 0; /* Don't combine write and output enable */ + reg_cfg.s.we_ext = 0; /* No write enable extension */ + reg_cfg.s.oe_ext = 0; /* No read enable extension */ + reg_cfg.s.en = 1; /* Enable this region */ + reg_cfg.s.orbit = 0; /* Don't combine with previous region */ + reg_cfg.s.ale = 0; /* Don't do address multiplexing */ + cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64); +} + +/** + * Called after libata determines the needed PIO mode. This + * function programs the Octeon bootbus regions to support the + * timing requirements of the PIO mode. + * + * @ap: ATA port information + * @dev: ATA device + */ +static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev) +{ + struct octeon_cf_port *cf_port = ap->private_data; + union cvmx_mio_boot_reg_timx reg_tim; + int T; + struct ata_timing timing; + + unsigned int div; + int use_iordy; + int trh; + int pause; + /* These names are timing parameters from the ATA spec */ + int t1; + int t2; + int t2i; + + /* + * A divisor value of four will overflow the timing fields at + * clock rates greater than 800MHz + */ + if (octeon_get_io_clock_rate() <= 800000000) + div = 4; + else + div = 8; + T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate()); + + if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T)) + BUG(); + + t1 = timing.setup; + if (t1) + t1--; + t2 = timing.active; + if (t2) + t2--; + t2i = timing.act8b; + if (t2i) + t2i--; + + trh = ns_to_tim_reg(div, 20); + if (trh) + trh--; + + pause = (int)timing.cycle - (int)timing.active - + (int)timing.setup - trh; + if (pause < 0) + pause = 0; + if (pause) + pause--; + + octeon_cf_set_boot_reg_cfg(cf_port->cs0, div); + if (cf_port->is_true_ide) + /* True IDE mode, program both chip selects. */ + octeon_cf_set_boot_reg_cfg(cf_port->cs1, div); + + + use_iordy = ata_pio_need_iordy(dev); + + reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0)); + /* Disable page mode */ + reg_tim.s.pagem = 0; + /* Enable dynamic timing */ + reg_tim.s.waitm = use_iordy; + /* Pages are disabled */ + reg_tim.s.pages = 0; + /* We don't use multiplexed address mode */ + reg_tim.s.ale = 0; + /* Not used */ + reg_tim.s.page = 0; + /* Time after IORDY to coninue to assert the data */ + reg_tim.s.wait = 0; + /* Time to wait to complete the cycle. */ + reg_tim.s.pause = pause; + /* How long to hold after a write to de-assert CE. */ + reg_tim.s.wr_hld = trh; + /* How long to wait after a read to de-assert CE. */ + reg_tim.s.rd_hld = trh; + /* How long write enable is asserted */ + reg_tim.s.we = t2; + /* How long read enable is asserted */ + reg_tim.s.oe = t2; + /* Time after CE that read/write starts */ + reg_tim.s.ce = ns_to_tim_reg(div, 5); + /* Time before CE that address is valid */ + reg_tim.s.adr = 0; + + /* Program the bootbus region timing for the data port chip select. */ + cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64); + if (cf_port->is_true_ide) + /* True IDE mode, program both chip selects. */ + cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1), + reg_tim.u64); +} + +static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev) +{ + struct octeon_cf_port *cf_port = ap->private_data; + union cvmx_mio_boot_pin_defs pin_defs; + union cvmx_mio_boot_dma_timx dma_tim; + unsigned int oe_a; + unsigned int oe_n; + unsigned int dma_ackh; + unsigned int dma_arq; + unsigned int pause; + unsigned int T0, Tkr, Td; + unsigned int tim_mult; + int c; + + const struct ata_timing *timing; + + timing = ata_timing_find_mode(dev->dma_mode); + T0 = timing->cycle; + Td = timing->active; + Tkr = timing->recover; + dma_ackh = timing->dmack_hold; + + dma_tim.u64 = 0; + /* dma_tim.s.tim_mult = 0 --> 4x */ + tim_mult = 4; + + /* not spec'ed, value in eclocks, not affected by tim_mult */ + dma_arq = 8; + pause = 25 - dma_arq * 1000 / + (octeon_get_io_clock_rate() / 1000000); /* Tz */ + + oe_a = Td; + /* Tkr from cf spec, lengthened to meet T0 */ + oe_n = max(T0 - oe_a, Tkr); + + pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS); + + /* DMA channel number. */ + c = (cf_port->dma_base & 8) >> 3; + + /* Invert the polarity if the default is 0*/ + dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1; + + dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n); + dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a); + + /* + * This is tI, C.F. spec. says 0, but Sony CF card requires + * more, we use 20 nS. + */ + dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20); + dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh); + + dma_tim.s.dmarq = dma_arq; + dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause); + + dma_tim.s.rd_dly = 0; /* Sample right on edge */ + + /* writes only */ + dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n); + dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a); + + pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60, + ns_to_tim_reg(tim_mult, 60)); + pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n", + dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s, + dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause); + + cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64); +} + +/** + * Handle an 8 bit I/O request. + * + * @dev: Device to access + * @buffer: Data buffer + * @buflen: Length of the buffer. + * @rw: True to write. + */ +static unsigned int octeon_cf_data_xfer8(struct ata_device *dev, + unsigned char *buffer, + unsigned int buflen, + int rw) +{ + struct ata_port *ap = dev->link->ap; + void __iomem *data_addr = ap->ioaddr.data_addr; + unsigned long words; + int count; + + words = buflen; + if (rw) { + count = 16; + while (words--) { + iowrite8(*buffer, data_addr); + buffer++; + /* + * Every 16 writes do a read so the bootbus + * FIFO doesn't fill up. + */ + if (--count == 0) { + ioread8(ap->ioaddr.altstatus_addr); + count = 16; + } + } + } else { + ioread8_rep(data_addr, buffer, words); + } + return buflen; +} + +/** + * Handle a 16 bit I/O request. + * + * @dev: Device to access + * @buffer: Data buffer + * @buflen: Length of the buffer. + * @rw: True to write. + */ +static unsigned int octeon_cf_data_xfer16(struct ata_device *dev, + unsigned char *buffer, + unsigned int buflen, + int rw) +{ + struct ata_port *ap = dev->link->ap; + void __iomem *data_addr = ap->ioaddr.data_addr; + unsigned long words; + int count; + + words = buflen / 2; + if (rw) { + count = 16; + while (words--) { + iowrite16(*(uint16_t *)buffer, data_addr); + buffer += sizeof(uint16_t); + /* + * Every 16 writes do a read so the bootbus + * FIFO doesn't fill up. + */ + if (--count == 0) { + ioread8(ap->ioaddr.altstatus_addr); + count = 16; + } + } + } else { + while (words--) { + *(uint16_t *)buffer = ioread16(data_addr); + buffer += sizeof(uint16_t); + } + } + /* Transfer trailing 1 byte, if any. */ + if (unlikely(buflen & 0x01)) { + __le16 align_buf[1] = { 0 }; + + if (rw == READ) { + align_buf[0] = cpu_to_le16(ioread16(data_addr)); + memcpy(buffer, align_buf, 1); + } else { + memcpy(align_buf, buffer, 1); + iowrite16(le16_to_cpu(align_buf[0]), data_addr); + } + words++; + } + return buflen; +} + +/** + * Read the taskfile for 16bit non-True IDE only. + */ +static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf) +{ + u16 blob; + /* The base of the registers is at ioaddr.data_addr. */ + void __iomem *base = ap->ioaddr.data_addr; + + blob = __raw_readw(base + 0xc); + tf->feature = blob >> 8; + + blob = __raw_readw(base + 2); + tf->nsect = blob & 0xff; + tf->lbal = blob >> 8; + + blob = __raw_readw(base + 4); + tf->lbam = blob & 0xff; + tf->lbah = blob >> 8; + + blob = __raw_readw(base + 6); + tf->device = blob & 0xff; + tf->command = blob >> 8; + + if (tf->flags & ATA_TFLAG_LBA48) { + if (likely(ap->ioaddr.ctl_addr)) { + iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr); + + blob = __raw_readw(base + 0xc); + tf->hob_feature = blob >> 8; + + blob = __raw_readw(base + 2); + tf->hob_nsect = blob & 0xff; + tf->hob_lbal = blob >> 8; + + blob = __raw_readw(base + 4); + tf->hob_lbam = blob & 0xff; + tf->hob_lbah = blob >> 8; + + iowrite8(tf->ctl, ap->ioaddr.ctl_addr); + ap->last_ctl = tf->ctl; + } else { + WARN_ON(1); + } + } +} + +static u8 octeon_cf_check_status16(struct ata_port *ap) +{ + u16 blob; + void __iomem *base = ap->ioaddr.data_addr; + + blob = __raw_readw(base + 6); + return blob >> 8; +} + +static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + void __iomem *base = ap->ioaddr.data_addr; + int rc; + u8 err; + + DPRINTK("about to softreset\n"); + __raw_writew(ap->ctl, base + 0xe); + udelay(20); + __raw_writew(ap->ctl | ATA_SRST, base + 0xe); + udelay(20); + __raw_writew(ap->ctl, base + 0xe); + + rc = ata_sff_wait_after_reset(link, 1, deadline); + if (rc) { + ata_link_err(link, "SRST failed (errno=%d)\n", rc); + return rc; + } + + /* determine by signature whether we have ATA or ATAPI devices */ + classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err); + DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); + return 0; +} + +/** + * Load the taskfile for 16bit non-True IDE only. The device_addr is + * not loaded, we do this as part of octeon_cf_exec_command16. + */ +static void octeon_cf_tf_load16(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + /* The base of the registers is at ioaddr.data_addr. */ + void __iomem *base = ap->ioaddr.data_addr; + + if (tf->ctl != ap->last_ctl) { + iowrite8(tf->ctl, ap->ioaddr.ctl_addr); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + __raw_writew(tf->hob_feature << 8, base + 0xc); + __raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2); + __raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + if (is_addr) { + __raw_writew(tf->feature << 8, base + 0xc); + __raw_writew(tf->nsect | tf->lbal << 8, base + 2); + __raw_writew(tf->lbam | tf->lbah << 8, base + 4); + VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + ata_wait_idle(ap); +} + + +static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device) +{ +/* There is only one device, do nothing. */ + return; +} + +/* + * Issue ATA command to host controller. The device_addr is also sent + * as it must be written in a combined write with the command. + */ +static void octeon_cf_exec_command16(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + /* The base of the registers is at ioaddr.data_addr. */ + void __iomem *base = ap->ioaddr.data_addr; + u16 blob; + + if (tf->flags & ATA_TFLAG_DEVICE) { + VPRINTK("device 0x%X\n", tf->device); + blob = tf->device; + } else { + blob = 0; + } + + DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); + blob |= (tf->command << 8); + __raw_writew(blob, base + 6); + + + ata_wait_idle(ap); +} + +static void octeon_cf_ata_port_noaction(struct ata_port *ap) +{ +} + +static void octeon_cf_dma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct octeon_cf_port *cf_port; + + cf_port = ap->private_data; + DPRINTK("ENTER\n"); + /* issue r/w command */ + qc->cursg = qc->sg; + cf_port->dma_finished = 0; + ap->ops->sff_exec_command(ap, &qc->tf); + DPRINTK("EXIT\n"); +} + +/** + * Start a DMA transfer that was already setup + * + * @qc: Information about the DMA + */ +static void octeon_cf_dma_start(struct ata_queued_cmd *qc) +{ + struct octeon_cf_port *cf_port = qc->ap->private_data; + union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg; + union cvmx_mio_boot_dma_intx mio_boot_dma_int; + struct scatterlist *sg; + + VPRINTK("%d scatterlists\n", qc->n_elem); + + /* Get the scatter list entry we need to DMA into */ + sg = qc->cursg; + BUG_ON(!sg); + + /* + * Clear the DMA complete status. + */ + mio_boot_dma_int.u64 = 0; + mio_boot_dma_int.s.done = 1; + cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64); + + /* Enable the interrupt. */ + cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64); + + /* Set the direction of the DMA */ + mio_boot_dma_cfg.u64 = 0; +#ifdef __LITTLE_ENDIAN + mio_boot_dma_cfg.s.endian = 1; +#endif + mio_boot_dma_cfg.s.en = 1; + mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0); + + /* + * Don't stop the DMA if the device deasserts DMARQ. Many + * compact flashes deassert DMARQ for a short time between + * sectors. Instead of stopping and restarting the DMA, we'll + * let the hardware do it. If the DMA is really stopped early + * due to an error condition, a later timeout will force us to + * stop. + */ + mio_boot_dma_cfg.s.clr = 0; + + /* Size is specified in 16bit words and minus one notation */ + mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1; + + /* We need to swap the high and low bytes of every 16 bits */ + mio_boot_dma_cfg.s.swap8 = 1; + + mio_boot_dma_cfg.s.adr = sg_dma_address(sg); + + VPRINTK("%s %d bytes address=%p\n", + (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length, + (void *)(unsigned long)mio_boot_dma_cfg.s.adr); + + cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64); +} + +/** + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + */ +static unsigned int octeon_cf_dma_finished(struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + struct octeon_cf_port *cf_port = ap->private_data; + union cvmx_mio_boot_dma_cfgx dma_cfg; + union cvmx_mio_boot_dma_intx dma_int; + u8 status; + + VPRINTK("ata%u: protocol %d task_state %d\n", + ap->print_id, qc->tf.protocol, ap->hsm_task_state); + + + if (ap->hsm_task_state != HSM_ST_LAST) + return 0; + + dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG); + if (dma_cfg.s.size != 0xfffff) { + /* Error, the transfer was not complete. */ + qc->err_mask |= AC_ERR_HOST_BUS; + ap->hsm_task_state = HSM_ST_ERR; + } + + /* Stop and clear the dma engine. */ + dma_cfg.u64 = 0; + dma_cfg.s.size = -1; + cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64); + + /* Disable the interrupt. */ + dma_int.u64 = 0; + cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64); + + /* Clear the DMA complete status */ + dma_int.s.done = 1; + cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64); + + status = ap->ops->sff_check_status(ap); + + ata_sff_hsm_move(ap, qc, status, 0); + + if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA)) + ata_ehi_push_desc(ehi, "DMA stat 0x%x", status); + + return 1; +} + +/* + * Check if any queued commands have more DMAs, if so start the next + * transfer, else do end of transfer handling. + */ +static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + struct octeon_cf_port *cf_port; + int i; + unsigned int handled = 0; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + DPRINTK("ENTER\n"); + for (i = 0; i < host->n_ports; i++) { + u8 status; + struct ata_port *ap; + struct ata_queued_cmd *qc; + union cvmx_mio_boot_dma_intx dma_int; + union cvmx_mio_boot_dma_cfgx dma_cfg; + + ap = host->ports[i]; + cf_port = ap->private_data; + + dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT); + dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG); + + qc = ata_qc_from_tag(ap, ap->link.active_tag); + + if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING)) + continue; + + if (dma_int.s.done && !dma_cfg.s.en) { + if (!sg_is_last(qc->cursg)) { + qc->cursg = sg_next(qc->cursg); + handled = 1; + octeon_cf_dma_start(qc); + continue; + } else { + cf_port->dma_finished = 1; + } + } + if (!cf_port->dma_finished) + continue; + status = ioread8(ap->ioaddr.altstatus_addr); + if (status & (ATA_BUSY | ATA_DRQ)) { + /* + * We are busy, try to handle it later. This + * is the DMA finished interrupt, and it could + * take a little while for the card to be + * ready for more commands. + */ + /* Clear DMA irq. */ + dma_int.u64 = 0; + dma_int.s.done = 1; + cvmx_write_csr(cf_port->dma_base + DMA_INT, + dma_int.u64); + hrtimer_start_range_ns(&cf_port->delayed_finish, + ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL), + OCTEON_CF_BUSY_POLL_INTERVAL / 5, + HRTIMER_MODE_REL); + handled = 1; + } else { + handled |= octeon_cf_dma_finished(ap, qc); + } + } + spin_unlock_irqrestore(&host->lock, flags); + DPRINTK("EXIT\n"); + return IRQ_RETVAL(handled); +} + +static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt) +{ + struct octeon_cf_port *cf_port = container_of(hrt, + struct octeon_cf_port, + delayed_finish); + struct ata_port *ap = cf_port->ap; + struct ata_host *host = ap->host; + struct ata_queued_cmd *qc; + unsigned long flags; + u8 status; + enum hrtimer_restart rv = HRTIMER_NORESTART; + + spin_lock_irqsave(&host->lock, flags); + + /* + * If the port is not waiting for completion, it must have + * handled it previously. The hsm_task_state is + * protected by host->lock. + */ + if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished) + goto out; + + status = ioread8(ap->ioaddr.altstatus_addr); + if (status & (ATA_BUSY | ATA_DRQ)) { + /* Still busy, try again. */ + hrtimer_forward_now(hrt, + ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL)); + rv = HRTIMER_RESTART; + goto out; + } + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) + octeon_cf_dma_finished(ap, qc); +out: + spin_unlock_irqrestore(&host->lock, flags); + return rv; +} + +static void octeon_cf_dev_config(struct ata_device *dev) +{ + /* + * A maximum of 2^20 - 1 16 bit transfers are possible with + * the bootbus DMA. So we need to throttle max_sectors to + * (2^12 - 1 == 4095) to assure that this can never happen. + */ + dev->max_sectors = min(dev->max_sectors, 4095U); +} + +/* + * We don't do ATAPI DMA so return 0. + */ +static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc) +{ + return 0; +} + +static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); + + ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + octeon_cf_dma_setup(qc); /* set up dma */ + octeon_cf_dma_start(qc); /* initiate dma */ + ap->hsm_task_state = HSM_ST_LAST; + break; + + case ATAPI_PROT_DMA: + dev_err(ap->dev, "Error, ATAPI not supported\n"); + BUG(); + + default: + return ata_sff_qc_issue(qc); + } + + return 0; +} + +static struct ata_port_operations octeon_cf_ops = { + .inherits = &ata_sff_port_ops, + .check_atapi_dma = octeon_cf_check_atapi_dma, + .qc_prep = ata_noop_qc_prep, + .qc_issue = octeon_cf_qc_issue, + .sff_dev_select = octeon_cf_dev_select, + .sff_irq_on = octeon_cf_ata_port_noaction, + .sff_irq_clear = octeon_cf_ata_port_noaction, + .cable_detect = ata_cable_40wire, + .set_piomode = octeon_cf_set_piomode, + .set_dmamode = octeon_cf_set_dmamode, + .dev_config = octeon_cf_dev_config, +}; + +static int octeon_cf_probe(struct platform_device *pdev) +{ + struct resource *res_cs0, *res_cs1; + + bool is_16bit; + const __be32 *cs_num; + struct property *reg_prop; + int n_addr, n_size, reg_len; + struct device_node *node; + const void *prop; + void __iomem *cs0; + void __iomem *cs1 = NULL; + struct ata_host *host; + struct ata_port *ap; + int irq = 0; + irq_handler_t irq_handler = NULL; + void __iomem *base; + struct octeon_cf_port *cf_port; + int rv = -ENOMEM; + + + node = pdev->dev.of_node; + if (node == NULL) + return -EINVAL; + + cf_port = devm_kzalloc(&pdev->dev, sizeof(*cf_port), GFP_KERNEL); + if (!cf_port) + return -ENOMEM; + + cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL); + + prop = of_get_property(node, "cavium,bus-width", NULL); + if (prop) + is_16bit = (be32_to_cpup(prop) == 16); + else + is_16bit = false; + + n_addr = of_n_addr_cells(node); + n_size = of_n_size_cells(node); + + reg_prop = of_find_property(node, "reg", ®_len); + if (!reg_prop || reg_len < sizeof(__be32)) + return -EINVAL; + + cs_num = reg_prop->value; + cf_port->cs0 = be32_to_cpup(cs_num); + + if (cf_port->is_true_ide) { + struct device_node *dma_node; + dma_node = of_parse_phandle(node, + "cavium,dma-engine-handle", 0); + if (dma_node) { + struct platform_device *dma_dev; + dma_dev = of_find_device_by_node(dma_node); + if (dma_dev) { + struct resource *res_dma; + int i; + res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0); + if (!res_dma) { + of_node_put(dma_node); + return -EINVAL; + } + cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start, + resource_size(res_dma)); + if (!cf_port->dma_base) { + of_node_put(dma_node); + return -EINVAL; + } + + irq_handler = octeon_cf_interrupt; + i = platform_get_irq(dma_dev, 0); + if (i > 0) + irq = i; + } + of_node_put(dma_node); + } + res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res_cs1) + return -EINVAL; + + cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start, + resource_size(res_cs1)); + if (!cs1) + return rv; + + if (reg_len < (n_addr + n_size + 1) * sizeof(__be32)) + return -EINVAL; + + cs_num += n_addr + n_size; + cf_port->cs1 = be32_to_cpup(cs_num); + } + + res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_cs0) + return -EINVAL; + + cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start, + resource_size(res_cs0)); + if (!cs0) + return rv; + + /* allocate host */ + host = ata_host_alloc(&pdev->dev, 1); + if (!host) + return rv; + + ap = host->ports[0]; + ap->private_data = cf_port; + pdev->dev.platform_data = cf_port; + cf_port->ap = ap; + ap->ops = &octeon_cf_ops; + ap->pio_mask = ATA_PIO6; + ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING; + + if (!is_16bit) { + base = cs0 + 0x800; + ap->ioaddr.cmd_addr = base; + ata_sff_std_ports(&ap->ioaddr); + + ap->ioaddr.altstatus_addr = base + 0xe; + ap->ioaddr.ctl_addr = base + 0xe; + octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8; + } else if (cf_port->is_true_ide) { + base = cs0; + ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1; + ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1); + ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1; + ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1; + ap->ioaddr.nsect_addr = base + (ATA_REG_NSECT << 1) + 1; + ap->ioaddr.lbal_addr = base + (ATA_REG_LBAL << 1) + 1; + ap->ioaddr.lbam_addr = base + (ATA_REG_LBAM << 1) + 1; + ap->ioaddr.lbah_addr = base + (ATA_REG_LBAH << 1) + 1; + ap->ioaddr.device_addr = base + (ATA_REG_DEVICE << 1) + 1; + ap->ioaddr.status_addr = base + (ATA_REG_STATUS << 1) + 1; + ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1; + ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1; + ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1; + octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16; + + ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0; + + /* True IDE mode needs a timer to poll for not-busy. */ + hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + cf_port->delayed_finish.function = octeon_cf_delayed_finish; + } else { + /* 16 bit but not True IDE */ + base = cs0 + 0x800; + octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16; + octeon_cf_ops.softreset = octeon_cf_softreset16; + octeon_cf_ops.sff_check_status = octeon_cf_check_status16; + octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16; + octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16; + octeon_cf_ops.sff_exec_command = octeon_cf_exec_command16; + + ap->ioaddr.data_addr = base + ATA_REG_DATA; + ap->ioaddr.nsect_addr = base + ATA_REG_NSECT; + ap->ioaddr.lbal_addr = base + ATA_REG_LBAL; + ap->ioaddr.ctl_addr = base + 0xe; + ap->ioaddr.altstatus_addr = base + 0xe; + } + cf_port->c0 = ap->ioaddr.ctl_addr; + + rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rv) + return rv; + + ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr); + + dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n", + is_16bit ? 16 : 8, + cf_port->is_true_ide ? ", True IDE" : ""); + + return ata_host_activate(host, irq, irq_handler, + IRQF_SHARED, &octeon_cf_sht); +} + +static void octeon_cf_shutdown(struct device *dev) +{ + union cvmx_mio_boot_dma_cfgx dma_cfg; + union cvmx_mio_boot_dma_intx dma_int; + + struct octeon_cf_port *cf_port = dev_get_platdata(dev); + + if (cf_port->dma_base) { + /* Stop and clear the dma engine. */ + dma_cfg.u64 = 0; + dma_cfg.s.size = -1; + cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64); + + /* Disable the interrupt. */ + dma_int.u64 = 0; + cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64); + + /* Clear the DMA complete status */ + dma_int.s.done = 1; + cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64); + + __raw_writeb(0, cf_port->c0); + udelay(20); + __raw_writeb(ATA_SRST, cf_port->c0); + udelay(20); + __raw_writeb(0, cf_port->c0); + mdelay(100); + } +} + +static struct of_device_id octeon_cf_match[] = { + { + .compatible = "cavium,ebt3000-compact-flash", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, octeon_i2c_match); + +static struct platform_driver octeon_cf_driver = { + .probe = octeon_cf_probe, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = octeon_cf_match, + .shutdown = octeon_cf_shutdown + }, +}; + +static int __init octeon_cf_init(void) +{ + return platform_driver_register(&octeon_cf_driver); +} + + +MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>"); +MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("platform:" DRV_NAME); + +module_init(octeon_cf_init); diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c new file mode 100644 index 00000000000..a7e95a54c78 --- /dev/null +++ b/drivers/ata/pata_of_platform.c @@ -0,0 +1,101 @@ +/* + * OF-platform PATA driver + * + * Copyright (c) 2007 MontaVista Software, Inc. + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/ata_platform.h> +#include <linux/libata.h> + +static int pata_of_platform_probe(struct platform_device *ofdev) +{ + int ret; + struct device_node *dn = ofdev->dev.of_node; + struct resource io_res; + struct resource ctl_res; + struct resource *irq_res; + unsigned int reg_shift = 0; + int pio_mode = 0; + int pio_mask; + const u32 *prop; + + ret = of_address_to_resource(dn, 0, &io_res); + if (ret) { + dev_err(&ofdev->dev, "can't get IO address from " + "device tree\n"); + return -EINVAL; + } + + if (of_device_is_compatible(dn, "electra-ide")) { + /* Altstatus is really at offset 0x3f6 from the primary window + * on electra-ide. Adjust ctl_res and io_res accordingly. + */ + ctl_res = io_res; + ctl_res.start = ctl_res.start+0x3f6; + io_res.end = ctl_res.start-1; + } else { + ret = of_address_to_resource(dn, 1, &ctl_res); + if (ret) { + dev_err(&ofdev->dev, "can't get CTL address from " + "device tree\n"); + return -EINVAL; + } + } + + irq_res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); + if (irq_res) + irq_res->flags = 0; + + prop = of_get_property(dn, "reg-shift", NULL); + if (prop) + reg_shift = be32_to_cpup(prop); + + prop = of_get_property(dn, "pio-mode", NULL); + if (prop) { + pio_mode = be32_to_cpup(prop); + if (pio_mode > 6) { + dev_err(&ofdev->dev, "invalid pio-mode\n"); + return -EINVAL; + } + } else { + dev_info(&ofdev->dev, "pio-mode unspecified, assuming PIO0\n"); + } + + pio_mask = 1 << pio_mode; + pio_mask |= (1 << pio_mode) - 1; + + return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res, + reg_shift, pio_mask); +} + +static struct of_device_id pata_of_platform_match[] = { + { .compatible = "ata-generic", }, + { .compatible = "electra-ide", }, + {}, +}; +MODULE_DEVICE_TABLE(of, pata_of_platform_match); + +static struct platform_driver pata_of_platform_driver = { + .driver = { + .name = "pata_of_platform", + .owner = THIS_MODULE, + .of_match_table = pata_of_platform_match, + }, + .probe = pata_of_platform_probe, + .remove = ata_platform_remove_one, +}; + +module_platform_driver(pata_of_platform_driver); + +MODULE_DESCRIPTION("OF-platform PATA driver"); +MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c index 31a285ca88d..b9bf78b7d48 100644 --- a/drivers/ata/pata_oldpiix.c +++ b/drivers/ata/pata_oldpiix.c @@ -1,7 +1,7 @@ /* * pata_oldpiix.c - Intel PATA/SATA controllers * - * (C) 2005 Red Hat <alan@redhat.com> + * (C) 2005 Red Hat * * Some parts based on ata_piix.c by Jeff Garzik and others. * @@ -16,7 +16,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> @@ -25,17 +24,19 @@ #include <linux/ata.h> #define DRV_NAME "pata_oldpiix" -#define DRV_VERSION "0.5.2" +#define DRV_VERSION "0.5.5" /** * oldpiix_pre_reset - probe begin - * @ap: ATA port + * @link: ATA link + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int oldpiix_pre_reset(struct ata_port *ap) +static int oldpiix_pre_reset(struct ata_link *link, unsigned long deadline) { + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits oldpiix_enable_bits[] = { { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ @@ -44,28 +45,14 @@ static int oldpiix_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) return -ENOENT; - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} -/** - * oldpiix_pata_error_handler - Probe specified port on PATA host controller - * @ap: Port to probe - * @classes: - * - * LOCKING: - * None (inherited from caller). - */ - -static void oldpiix_pata_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, oldpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + return ata_sff_prereset(link, deadline); } /** * oldpiix_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring - * @adev: um + * @adev: Device whose timings we are configuring * * Set PIO mode for device, in host controller PCI config space. * @@ -94,19 +81,21 @@ static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev) { 2, 1 }, { 2, 3 }, }; - if (pio > 2) - control |= 1; /* TIME1 enable */ + if (pio > 1) + control |= 1; /* TIME */ if (ata_pio_need_iordy(adev)) - control |= 2; /* IE IORDY */ + control |= 2; /* IE */ - /* Intel specifies that the PPE functionality is for disk only */ + /* Intel specifies that the prefetch/posting is for disk only */ if (adev->class == ATA_DEV_ATA) - control |= 4; /* PPE enable */ + control |= 4; /* PPE */ pci_read_config_word(dev, idetm_port, &idetm_data); - /* Enable PPE, IE and TIME as appropriate. Clear the other - drive timing bits */ + /* + * Set PPE, IE and TIME as appropriate. + * Clear the other drive's timing bits. + */ if (adev->devno == 0) { idetm_data &= 0xCCE0; idetm_data |= control; @@ -126,7 +115,6 @@ static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev) * oldpiix_set_dmamode - Initialize host controller PATA DMA timings * @ap: Port whose timings we are configuring * @adev: Device to program - * @isich: True if the device is an ICH and has IOCFG registers * * Set MWDMA mode for device, in host controller PCI config space. * @@ -191,80 +179,41 @@ static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev) } /** - * oldpiix_qc_issue_prot - command issue + * oldpiix_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if - * neccessary. Our logic also clears TIME0/TIME1 for the other device so + * necessary. Our logic also clears TIME0/TIME1 for the other device so * that, even if we get this wrong, cycles to the other device will * be made PIO0. */ -static unsigned int oldpiix_qc_issue_prot(struct ata_queued_cmd *qc) +static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; if (adev != ap->private_data) { - if (adev->dma_mode) + oldpiix_set_piomode(ap, adev); + if (ata_dma_enabled(adev)) oldpiix_set_dmamode(ap, adev); - else if (adev->pio_mode) - oldpiix_set_piomode(ap, adev); } - return ata_qc_issue_prot(qc); + return ata_bmdma_qc_issue(qc); } static struct scsi_host_template oldpiix_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; -static const struct ata_port_operations oldpiix_pata_ops = { - .port_disable = ata_port_disable, +static struct ata_port_operations oldpiix_pata_ops = { + .inherits = &ata_bmdma_port_ops, + .qc_issue = oldpiix_qc_issue, + .cable_detect = ata_cable_40wire, .set_piomode = oldpiix_set_piomode, .set_dmamode = oldpiix_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = oldpiix_pata_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = oldpiix_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .prereset = oldpiix_pre_reset, }; @@ -285,25 +234,22 @@ static const struct ata_port_operations oldpiix_pata_ops = { static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - static struct ata_port_info info = { - .sht = &oldpiix_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma1-2 */ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, .port_ops = &oldpiix_pata_ops, }; - static struct ata_port_info *port_info[2] = { &info, &info }; + const struct ata_port_info *ppi[] = { &info, NULL }; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, - "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - return ata_pci_init_one(pdev, port_info, 2); + return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); } static const struct pci_device_id oldpiix_pci_tbl[] = { - { PCI_DEVICE(0x8086, 0x1230), }, + { PCI_VDEVICE(INTEL, 0x1230), }, + { } /* terminate list */ }; @@ -312,25 +258,16 @@ static struct pci_driver oldpiix_pci_driver = { .id_table = oldpiix_pci_tbl, .probe = oldpiix_init_one, .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init oldpiix_init(void) -{ - return pci_register_driver(&oldpiix_pci_driver); -} - -static void __exit oldpiix_exit(void) -{ - pci_unregister_driver(&oldpiix_pci_driver); -} - - -module_init(oldpiix_init); -module_exit(oldpiix_exit); +module_pci_driver(oldpiix_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for early PIIX series controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, oldpiix_pci_tbl); MODULE_VERSION(DRV_VERSION); - diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c index 57fe21f3a97..3a944a02926 100644 --- a/drivers/ata/pata_opti.c +++ b/drivers/ata/pata_opti.c @@ -1,7 +1,6 @@ /* * pata_opti.c - ATI PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox <alan@redhat.com> * * Based on * linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002 @@ -27,14 +26,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_opti" -#define DRV_VERSION "0.2.5" +#define DRV_VERSION "0.2.9" enum { READ_REG = 0, /* index of Read cycle timing register */ @@ -46,13 +44,15 @@ enum { /** * opti_pre_reset - probe begin - * @ap: ATA port + * @link: ATA link + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int opti_pre_reset(struct ata_port *ap) +static int opti_pre_reset(struct ata_link *link, unsigned long deadline) { + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits opti_enable_bits[] = { { 0x45, 1, 0x80, 0x00 }, @@ -62,23 +62,7 @@ static int opti_pre_reset(struct ata_port *ap) if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no])) return -ENOENT; - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -/** - * opti_probe_reset - probe reset - * @ap: ATA port - * - * Perform the ATA probe and bus reset sequence plus specific handling - * for this hardware. The Opti needs little handling - we have no UDMA66 - * capability that needs cable detection. All we must do is check the port - * is enabled. - */ - -static void opti_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, opti_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + return ata_sff_prereset(link, deadline); } /** @@ -95,43 +79,19 @@ static void opti_error_handler(struct ata_port *ap) static void opti_write_reg(struct ata_port *ap, u8 val, int reg) { - unsigned long regio = ap->ioaddr.cmd_addr; + void __iomem *regio = ap->ioaddr.cmd_addr; /* These 3 unlock the control register access */ - inw(regio + 1); - inw(regio + 1); - outb(3, regio + 2); + ioread16(regio + 1); + ioread16(regio + 1); + iowrite8(3, regio + 2); /* Do the I/O */ - outb(val, regio + reg); + iowrite8(val, regio + reg); /* Relock */ - outb(0x83, regio + 2); -} - -#if 0 -/** - * opti_read_reg - control register read - * @ap: ATA port - * @reg: control register number - * - * The Opti uses magic 'trapdoor' register accesses to do configuration - * rather than using PCI space as other controllers do. The double inw - * on the error register activates configuration mode. We can then read - * the control register - */ - -static u8 opti_read_reg(struct ata_port *ap, int reg) -{ - unsigned long regio = ap->ioaddr.cmd_addr; - u8 ret; - inw(regio + 1); - inw(regio + 1); - outb(3, regio + 2); - ret = inb(regio + reg); - outb(0x83, regio + 2); + iowrite8(0x83, regio + 2); } -#endif /** * opti_set_piomode - set initial PIO mode data @@ -148,7 +108,7 @@ static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev) struct ata_device *pair = ata_dev_pair(adev); int clock; int pio = adev->pio_mode - XFER_PIO_0; - unsigned long regio = ap->ioaddr.cmd_addr; + void __iomem *regio = ap->ioaddr.cmd_addr; u8 addr; /* Address table precomputed with prefetch off and a DCLK of 2 */ @@ -161,8 +121,8 @@ static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev) { 0x58, 0x44, 0x32, 0x22, 0x21 } }; - outb(0xff, regio + 5); - clock = inw(regio + 5) & 1; + iowrite8(0xff, regio + 5); + clock = ioread16(regio + 5) & 1; /* * As with many controllers the address setup time is shared @@ -188,103 +148,52 @@ static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev) } static struct scsi_host_template opti_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations opti_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, .set_piomode = opti_set_piomode, -/* .set_dmamode = opti_set_dmamode, */ - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = opti_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .prereset = opti_pre_reset, }; static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - static struct ata_port_info info = { - .sht = &opti_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, .port_ops = &opti_port_ops }; - static struct ata_port_info *port_info[2] = { &info, &info }; - static int printed_version; + const struct ata_port_info *ppi[] = { &info, NULL }; - if (!printed_version++) - dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&dev->dev, DRV_VERSION); - return ata_pci_init_one(dev, port_info, 2); + return ata_pci_sff_init_one(dev, ppi, &opti_sht, NULL, 0); } static const struct pci_device_id opti[] = { - { PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - { PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, - { 0, }, + { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 }, + { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 1 }, + + { }, }; static struct pci_driver opti_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = opti, .probe = opti_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init opti_init(void) -{ - return pci_register_driver(&opti_pci_driver); -} - - -static void __exit opti_exit(void) -{ - pci_unregister_driver(&opti_pci_driver); -} - +module_pci_driver(opti_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Opti 621/621X"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, opti); MODULE_VERSION(DRV_VERSION); - -module_init(opti_init); -module_exit(opti_exit); diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c index 7296a20cd10..bdec7efa464 100644 --- a/drivers/ata/pata_optidma.c +++ b/drivers/ata/pata_optidma.c @@ -1,7 +1,6 @@ /* * pata_optidma.c - Opti DMA PATA for new ATA layer * (C) 2006 Red Hat Inc - * Alan Cox <alan@redhat.com> * * The Opti DMA controllers are related to the older PIO PCI controllers * and indeed the VLB ones. The main differences are that the timing @@ -26,14 +25,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_optidma" -#define DRV_VERSION "0.2.2" +#define DRV_VERSION "0.3.2" enum { READ_REG = 0, /* index of Read cycle timing register */ @@ -47,13 +45,15 @@ static int pci_clock; /* 0 = 33 1 = 25 */ /** * optidma_pre_reset - probe begin - * @ap: ATA port + * @link: ATA link + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int optidma_pre_reset(struct ata_port *ap) +static int optidma_pre_reset(struct ata_link *link, unsigned long deadline) { + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits optidma_enable_bits = { 0x40, 1, 0x08, 0x00 @@ -62,23 +62,7 @@ static int optidma_pre_reset(struct ata_port *ap) if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits)) return -ENOENT; - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -/** - * optidma_probe_reset - probe reset - * @ap: ATA port - * - * Perform the ATA probe and bus reset sequence plus specific handling - * for this hardware. The Opti needs little handling - we have no UDMA66 - * capability that needs cable detection. All we must do is check the port - * is enabled. - */ - -static void optidma_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, optidma_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + return ata_sff_prereset(link, deadline); } /** @@ -91,12 +75,12 @@ static void optidma_error_handler(struct ata_port *ap) static void optidma_unlock(struct ata_port *ap) { - unsigned long regio = ap->ioaddr.cmd_addr; + void __iomem *regio = ap->ioaddr.cmd_addr; /* These 3 unlock the control register access */ - inw(regio + 1); - inw(regio + 1); - outb(3, regio + 2); + ioread16(regio + 1); + ioread16(regio + 1); + iowrite8(3, regio + 2); } /** @@ -108,14 +92,14 @@ static void optidma_unlock(struct ata_port *ap) static void optidma_lock(struct ata_port *ap) { - unsigned long regio = ap->ioaddr.cmd_addr; + void __iomem *regio = ap->ioaddr.cmd_addr; /* Relock */ - outb(0x83, regio + 2); + iowrite8(0x83, regio + 2); } /** - * optidma_set_mode - set mode data + * optidma_mode_setup - set mode data * @ap: ATA interface * @adev: ATA device * @mode: Mode to set @@ -128,12 +112,12 @@ static void optidma_lock(struct ata_port *ap) * IRQ here we depend on the host set locking to avoid catastrophe. */ -static void optidma_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) +static void optidma_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode) { struct ata_device *pair = ata_dev_pair(adev); int pio = adev->pio_mode - XFER_PIO_0; int dma = adev->dma_mode - XFER_MW_DMA_0; - unsigned long regio = ap->ioaddr.cmd_addr; + void __iomem *regio = ap->ioaddr.cmd_addr; u8 addr; /* Address table precomputed with a DCLK of 2 */ @@ -178,20 +162,20 @@ static void optidma_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mo /* Commence primary programming sequence */ /* First we load the device number into the timing select */ - outb(adev->devno, regio + MISC_REG); + iowrite8(adev->devno, regio + MISC_REG); /* Now we load the data timings into read data/write data */ if (mode < XFER_MW_DMA_0) { - outb(data_rec_timing[pci_clock][pio], regio + READ_REG); - outb(data_rec_timing[pci_clock][pio], regio + WRITE_REG); + iowrite8(data_rec_timing[pci_clock][pio], regio + READ_REG); + iowrite8(data_rec_timing[pci_clock][pio], regio + WRITE_REG); } else if (mode < XFER_UDMA_0) { - outb(dma_data_rec_timing[pci_clock][dma], regio + READ_REG); - outb(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG); + iowrite8(dma_data_rec_timing[pci_clock][dma], regio + READ_REG); + iowrite8(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG); } /* Finally we load the address setup into the misc register */ - outb(addr | adev->devno, regio + MISC_REG); + iowrite8(addr | adev->devno, regio + MISC_REG); /* Programming sequence complete, timing 0 dev 0, timing 1 dev 1 */ - outb(0x85, regio + CNTRL_REG); + iowrite8(0x85, regio + CNTRL_REG); /* Switch back to IDE mode */ optidma_lock(ap); @@ -202,7 +186,7 @@ static void optidma_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mo } /** - * optiplus_set_mode - DMA setup for Firestar Plus + * optiplus_mode_setup - DMA setup for Firestar Plus * @ap: ATA port * @adev: device * @mode: desired mode @@ -213,7 +197,7 @@ static void optidma_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mo * one */ -static void optiplus_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) +static void optiplus_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 udcfg; @@ -225,7 +209,7 @@ static void optiplus_set_mode(struct ata_port *ap, struct ata_device *adev, u8 m pci_read_config_byte(pdev, 0x44, &udcfg); if (mode <= XFER_UDMA_0) { udcfg &= ~(1 << unit); - optidma_set_mode(ap, adev, adev->dma_mode); + optidma_mode_setup(ap, adev, adev->dma_mode); } else { udcfg |= (1 << unit); if (ap->port_no) { @@ -253,7 +237,7 @@ static void optiplus_set_mode(struct ata_port *ap, struct ata_device *adev, u8 m static void optidma_set_pio_mode(struct ata_port *ap, struct ata_device *adev) { - optidma_set_mode(ap, adev, adev->pio_mode); + optidma_mode_setup(ap, adev, adev->pio_mode); } /** @@ -268,7 +252,7 @@ static void optidma_set_pio_mode(struct ata_port *ap, struct ata_device *adev) static void optidma_set_dma_mode(struct ata_port *ap, struct ata_device *adev) { - optidma_set_mode(ap, adev, adev->dma_mode); + optidma_mode_setup(ap, adev, adev->dma_mode); } /** @@ -283,7 +267,7 @@ static void optidma_set_dma_mode(struct ata_port *ap, struct ata_device *adev) static void optiplus_set_pio_mode(struct ata_port *ap, struct ata_device *adev) { - optiplus_set_mode(ap, adev, adev->pio_mode); + optiplus_mode_setup(ap, adev, adev->pio_mode); } /** @@ -298,7 +282,7 @@ static void optiplus_set_pio_mode(struct ata_port *ap, struct ata_device *adev) static void optiplus_set_dma_mode(struct ata_port *ap, struct ata_device *adev) { - optiplus_set_mode(ap, adev, adev->dma_mode); + optiplus_mode_setup(ap, adev, adev->dma_mode); } /** @@ -322,114 +306,49 @@ static u8 optidma_make_bits43(struct ata_device *adev) } /** - * optidma_post_set_mode - finalize PCI setup - * @ap: port to set up + * optidma_set_mode - mode setup + * @link: link to set up * - * Finalise the configuration by writing the nibble of extra bits - * of data into the chip. + * Use the standard setup to tune the chipset and then finalise the + * configuration by writing the nibble of extra bits of data into + * the chip. */ -static void optidma_post_set_mode(struct ata_port *ap) +static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed) { + struct ata_port *ap = link->ap; u8 r; int nybble = 4 * ap->port_no; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - - pci_read_config_byte(pdev, 0x43, &r); - - r &= (0x0F << nybble); - r |= (optidma_make_bits43(&ap->device[0]) + - (optidma_make_bits43(&ap->device[0]) << 2)) << nybble; - - pci_write_config_byte(pdev, 0x43, r); + int rc = ata_do_set_mode(link, r_failed); + if (rc == 0) { + pci_read_config_byte(pdev, 0x43, &r); + + r &= (0x0F << nybble); + r |= (optidma_make_bits43(&link->device[0]) + + (optidma_make_bits43(&link->device[0]) << 2)) << nybble; + pci_write_config_byte(pdev, 0x43, r); + } + return rc; } static struct scsi_host_template optidma_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations optidma_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_40wire, .set_piomode = optidma_set_pio_mode, .set_dmamode = optidma_set_dma_mode, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - .error_handler = optidma_error_handler, - .post_set_mode = optidma_post_set_mode, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .set_mode = optidma_set_mode, + .prereset = optidma_pre_reset, }; static struct ata_port_operations optiplus_port_ops = { - .port_disable = ata_port_disable, + .inherits = &optidma_port_ops, .set_piomode = optiplus_set_pio_mode, .set_dmamode = optiplus_set_dma_mode, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - .error_handler = optidma_error_handler, - .post_set_mode = optidma_post_set_mode, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; /** @@ -446,7 +365,7 @@ static int optiplus_with_udma(struct pci_dev *pdev) /* Find function 1 */ dev1 = pci_get_device(0x1045, 0xC701, NULL); - if(dev1 == NULL) + if (dev1 == NULL) return 0; /* Rev must be >= 0x10 */ @@ -477,27 +396,27 @@ done_nomsg: /* Wrong chip revision */ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - static struct ata_port_info info_82c700 = { - .sht = &optidma_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + static const struct ata_port_info info_82c700 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .port_ops = &optidma_port_ops }; - static struct ata_port_info info_82c700_udma = { - .sht = &optidma_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x07, + static const struct ata_port_info info_82c700_udma = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA2, .port_ops = &optiplus_port_ops }; - static struct ata_port_info *port_info[2]; - struct ata_port_info *info = &info_82c700; - static int printed_version; + const struct ata_port_info *ppi[] = { &info_82c700, NULL }; + int rc; + + ata_print_version_once(&dev->dev, DRV_VERSION); - if (!printed_version++) - dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); + rc = pcim_enable_device(dev); + if (rc) + return rc; /* Fixed location chipset magic */ inw(0x1F1); @@ -505,41 +424,32 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) pci_clock = inb(0x1F5) & 1; /* 0 = 33Mhz, 1 = 25Mhz */ if (optiplus_with_udma(dev)) - info = &info_82c700_udma; + ppi[0] = &info_82c700_udma; - port_info[0] = port_info[1] = info; - return ata_pci_init_one(dev, port_info, 2); + return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0); } static const struct pci_device_id optidma[] = { - { PCI_DEVICE(0x1045, 0xD568), }, /* Opti 82C700 */ - { 0, }, + { PCI_VDEVICE(OPTI, 0xD568), }, /* Opti 82C700 */ + + { }, }; static struct pci_driver optidma_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = optidma, .probe = optidma_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init optidma_init(void) -{ - return pci_register_driver(&optidma_pci_driver); -} - - -static void __exit optidma_exit(void) -{ - pci_unregister_driver(&optidma_pci_driver); -} - +module_pci_driver(optidma_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, optidma); MODULE_VERSION(DRV_VERSION); - -module_init(optidma_init); -module_exit(optidma_exit); diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c new file mode 100644 index 00000000000..df2bb7504fc --- /dev/null +++ b/drivers/ata/pata_palmld.c @@ -0,0 +1,138 @@ +/* + * drivers/ata/pata_palmld.c + * + * Driver for IDE channel in Palm LifeDrive + * + * Based on research of: + * Alex Osborne <ato@meshy.org> + * + * Rewrite for mainline: + * Marek Vasut <marek.vasut@gmail.com> + * + * Rewritten version based on pata_ixp4xx_cf.c: + * ixp4xx PATA/Compact Flash driver + * Copyright (C) 2006-07 Tower Technologies + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/libata.h> +#include <linux/irq.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/gpio.h> + +#include <scsi/scsi_host.h> +#include <mach/palmld.h> + +#define DRV_NAME "pata_palmld" + +static struct gpio palmld_hdd_gpios[] = { + { GPIO_NR_PALMLD_IDE_PWEN, GPIOF_INIT_HIGH, "HDD Power" }, + { GPIO_NR_PALMLD_IDE_RESET, GPIOF_INIT_LOW, "HDD Reset" }, +}; + +static struct scsi_host_template palmld_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static struct ata_port_operations palmld_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + .cable_detect = ata_cable_40wire, +}; + +static int palmld_pata_probe(struct platform_device *pdev) +{ + struct ata_host *host; + struct ata_port *ap; + void __iomem *mem; + int ret; + + /* allocate host */ + host = ata_host_alloc(&pdev->dev, 1); + if (!host) { + ret = -ENOMEM; + goto err1; + } + + /* remap drive's physical memory address */ + mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000); + if (!mem) { + ret = -ENOMEM; + goto err1; + } + + /* request and activate power GPIO, IRQ GPIO */ + ret = gpio_request_array(palmld_hdd_gpios, + ARRAY_SIZE(palmld_hdd_gpios)); + if (ret) + goto err1; + + /* reset the drive */ + gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0); + msleep(30); + gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 1); + msleep(30); + + /* setup the ata port */ + ap = host->ports[0]; + ap->ops = &palmld_port_ops; + ap->pio_mask = ATA_PIO4; + ap->flags |= ATA_FLAG_PIO_POLLING; + + /* memory mapping voodoo */ + ap->ioaddr.cmd_addr = mem + 0x10; + ap->ioaddr.altstatus_addr = mem + 0xe; + ap->ioaddr.ctl_addr = mem + 0xe; + + /* start the port */ + ata_sff_std_ports(&ap->ioaddr); + + /* activate host */ + ret = ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING, + &palmld_sht); + if (ret) + goto err2; + + return ret; + +err2: + gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios)); +err1: + return ret; +} + +static int palmld_pata_remove(struct platform_device *dev) +{ + ata_platform_remove_one(dev); + + /* power down the HDD */ + gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0); + + gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios)); + + return 0; +} + +static struct platform_driver palmld_pata_platform_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = palmld_pata_probe, + .remove = palmld_pata_remove, +}; + +module_platform_driver(palmld_pata_platform_driver); + +MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); +MODULE_DESCRIPTION("PalmLD PATA driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index cb501e145a4..bcc4b968c04 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c @@ -1,6 +1,6 @@ /* * pata_pcmcia.c - PCMCIA PATA controller driver. - * Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved. + * Copyright 2005-2006 Red Hat Inc, all rights reserved. * PCMCIA ident update Copyright 2006 Marcin Juszkiewicz * <openembedded@hrw.one.pl> * @@ -26,15 +26,13 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> +#include <linux/slab.h> #include <scsi/scsi_host.h> #include <linux/ata.h> #include <linux/libata.h> -#include <pcmcia/cs_types.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> @@ -42,64 +40,153 @@ #define DRV_NAME "pata_pcmcia" -#define DRV_VERSION "0.2.9" +#define DRV_VERSION "0.3.5" -/* - * Private data structure to glue stuff together +/** + * pcmcia_set_mode - PCMCIA specific mode setup + * @link: link + * @r_failed_dev: Return pointer for failed device + * + * Perform the tuning and setup of the devices and timings, which + * for PCMCIA is the same as any other controller. We wrap it however + * as we need to spot hardware with incorrect or missing master/slave + * decode, which alas is embarrassingly common in the PC world */ -struct ata_pcmcia_info { - struct pcmcia_device *pdev; - int ndev; - dev_node_t node; -}; +static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) +{ + struct ata_device *master = &link->device[0]; + struct ata_device *slave = &link->device[1]; + + if (!ata_dev_enabled(master) || !ata_dev_enabled(slave)) + return ata_do_set_mode(link, r_failed_dev); + + if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV, + ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) { + /* Suspicious match, but could be two cards from + the same vendor - check serial */ + if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO, + ATA_ID_SERNO_LEN) == 0 && master->id[ATA_ID_SERNO] >> 8) { + ata_dev_warn(slave, "is a ghost device, ignoring\n"); + ata_dev_disable(slave); + } + } + return ata_do_set_mode(link, r_failed_dev); +} + +/** + * pcmcia_set_mode_8bit - PCMCIA specific mode setup + * @link: link + * @r_failed_dev: Return pointer for failed device + * + * For the simple emulated 8bit stuff the less we do the better. + */ + +static int pcmcia_set_mode_8bit(struct ata_link *link, + struct ata_device **r_failed_dev) +{ + return 0; +} + +/** + * ata_data_xfer_8bit - Transfer data by 8bit PIO + * @dev: device to target + * @buf: data buffer + * @buflen: buffer length + * @rw: read/write + * + * Transfer data from/to the device data register by 8 bit PIO. + * + * LOCKING: + * Inherited from caller. + */ + +static unsigned int ata_data_xfer_8bit(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw) +{ + struct ata_port *ap = dev->link->ap; + + if (rw == READ) + ioread8_rep(ap->ioaddr.data_addr, buf, buflen); + else + iowrite8_rep(ap->ioaddr.data_addr, buf, buflen); + + return buflen; +} + +/** + * pcmcia_8bit_drain_fifo - Stock FIFO drain logic for SFF controllers + * @qc: command + * + * Drain the FIFO and device of any stuck data following a command + * failing to complete. In some cases this is necessary before a + * reset will recover the device. + * + */ + +static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc) +{ + int count; + struct ata_port *ap; + + /* We only need to flush incoming data when a command was running */ + if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) + return; + + ap = qc->ap; + + /* Drain up to 64K of data before we give up this recovery method */ + for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) + && count++ < 65536;) + ioread8(ap->ioaddr.data_addr); + + if (count) + ata_port_warn(ap, "drained %d bytes to clear DRQ\n", count); + +} static struct scsi_host_template pcmcia_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations pcmcia_port_ops = { - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ata_bmdma_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + .cable_detect = ata_cable_40wire, + .set_mode = pcmcia_set_mode, +}; - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, +static struct ata_port_operations pcmcia_8bit_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_data_xfer_8bit, + .cable_detect = ata_cable_40wire, + .set_mode = pcmcia_set_mode_8bit, + .sff_drain_fifo = pcmcia_8bit_drain_fifo, +}; - .data_xfer = ata_pio_data_xfer_noirq, - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, +static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data) +{ + int *is_kme = priv_data; - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop -}; + if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH) + != IO_DATA_PATH_WIDTH_8) { + pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + } + pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + + if (pdev->resource[1]->end) { + pdev->resource[0]->end = 8; + pdev->resource[1]->end = (*is_kme) ? 2 : 1; + } else { + if (pdev->resource[0]->end < 16) + return -ENODEV; + } -#define CS_CHECK(fn, ret) \ -do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) + return pcmcia_request_io(pdev); +} /** * pcmcia_init_one - attach a PCMCIA interface @@ -111,180 +198,95 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) static int pcmcia_init_one(struct pcmcia_device *pdev) { - struct ata_probe_ent ae; - struct ata_pcmcia_info *info; - tuple_t tuple; - struct { - unsigned short buf[128]; - cisparse_t parse; - config_info_t conf; - cistpl_cftable_entry_t dflt; - } *stk = NULL; - cistpl_cftable_entry_t *cfg; - int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM; + struct ata_host *host; + struct ata_port *ap; + int is_kme = 0, ret = -ENOMEM, p; unsigned long io_base, ctl_base; - - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (info == NULL) - return -ENOMEM; - - /* Glue stuff together. FIXME: We may be able to get rid of info with care */ - info->pdev = pdev; - pdev->priv = info; + void __iomem *io_addr, *ctl_addr; + int n_ports = 1; + struct ata_port_operations *ops = &pcmcia_port_ops; /* Set up attributes in order to probe card and get resources */ - pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; - pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; - pdev->io.IOAddrLines = 3; - pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; - pdev->irq.IRQInfo1 = IRQ_LEVEL_ID; - pdev->conf.Attributes = CONF_ENABLE_IRQ; - pdev->conf.IntType = INT_MEMORY_AND_IO; - - /* Allocate resoure probing structures */ - - stk = kzalloc(sizeof(*stk), GFP_KERNEL); - if (!stk) - goto out1; - - cfg = &stk->parse.cftable_entry; - - /* Tuples we are walking */ - tuple.TupleData = (cisdata_t *)&stk->buf; - tuple.TupleOffset = 0; - tuple.TupleDataMax = 255; - tuple.Attributes = 0; - tuple.DesiredTuple = CISTPL_CONFIG; - - CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple)); - CS_CHECK(GetTupleData, pcmcia_get_tuple_data(pdev, &tuple)); - CS_CHECK(ParseTuple, pcmcia_parse_tuple(pdev, &tuple, &stk->parse)); - pdev->conf.ConfigBase = stk->parse.config.base; - pdev->conf.Present = stk->parse.config.rmask[0]; + pdev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO | + CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; /* See if we have a manufacturer identifier. Use it to set is_kme for vendor quirks */ - tuple.DesiredTuple = CISTPL_MANFID; - if (!pcmcia_get_first_tuple(pdev, &tuple) && !pcmcia_get_tuple_data(pdev, &tuple) && !pcmcia_parse_tuple(pdev, &tuple, &stk->parse)) - is_kme = ((stk->parse.manfid.manf == MANFID_KME) && ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) || (stk->parse.manfid.card == PRODID_KME_KXLC005_B))); - - /* Not sure if this is right... look up the current Vcc */ - CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(pdev, &stk->conf)); -/* link->conf.Vcc = stk->conf.Vcc; */ - - pass = io_base = ctl_base = 0; - tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; - tuple.Attributes = 0; - CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple)); - - /* Now munch the resources looking for a suitable set */ - while (1) { - if (pcmcia_get_tuple_data(pdev, &tuple) != 0) - goto next_entry; - if (pcmcia_parse_tuple(pdev, &tuple, &stk->parse) != 0) - goto next_entry; - /* Check for matching Vcc, unless we're desperate */ - if (!pass) { - if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (stk->conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) - goto next_entry; - } else if (stk->dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (stk->conf.Vcc != stk->dflt.vcc.param[CISTPL_POWER_VNOM] / 10000) - goto next_entry; - } - } - - if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) - pdev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; - else if (stk->dflt.vpp1.present & (1 << CISTPL_POWER_VNOM)) - pdev->conf.Vpp = stk->dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000; - - if ((cfg->io.nwin > 0) || (stk->dflt.io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &stk->dflt.io; - pdev->conf.ConfigIndex = cfg->index; - pdev->io.BasePort1 = io->win[0].base; - pdev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; - if (!(io->flags & CISTPL_IO_16BIT)) - pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; - if (io->nwin == 2) { - pdev->io.NumPorts1 = 8; - pdev->io.BasePort2 = io->win[1].base; - pdev->io.NumPorts2 = (is_kme) ? 2 : 1; - if (pcmcia_request_io(pdev, &pdev->io) != 0) - goto next_entry; - io_base = pdev->io.BasePort1; - ctl_base = pdev->io.BasePort2; - } else if ((io->nwin == 1) && (io->win[0].len >= 16)) { - pdev->io.NumPorts1 = io->win[0].len; - pdev->io.NumPorts2 = 0; - if (pcmcia_request_io(pdev, &pdev->io) != 0) - goto next_entry; - io_base = pdev->io.BasePort1; - ctl_base = pdev->io.BasePort1 + 0x0e; - } else goto next_entry; - /* If we've got this far, we're done */ - break; - } -next_entry: - if (cfg->flags & CISTPL_CFTABLE_DEFAULT) - memcpy(&stk->dflt, cfg, sizeof(stk->dflt)); - if (pass) { - CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(pdev, &tuple)); - } else if (pcmcia_get_next_tuple(pdev, &tuple) != 0) { - CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple)); - memset(&stk->dflt, 0, sizeof(stk->dflt)); - pass++; - } + is_kme = ((pdev->manf_id == MANFID_KME) && + ((pdev->card_id == PRODID_KME_KXLC005_A) || + (pdev->card_id == PRODID_KME_KXLC005_B))); + + if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme)) { + pdev->config_flags &= ~CONF_AUTO_CHECK_VCC; + if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme)) + goto failed; /* No suitable config found */ } + io_base = pdev->resource[0]->start; + if (pdev->resource[1]->end) + ctl_base = pdev->resource[1]->start; + else + ctl_base = pdev->resource[0]->start + 0x0e; + + if (!pdev->irq) + goto failed; - CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq)); - CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf)); + ret = pcmcia_enable_device(pdev); + if (ret) + goto failed; + + /* iomap */ + ret = -ENOMEM; + io_addr = devm_ioport_map(&pdev->dev, io_base, 8); + ctl_addr = devm_ioport_map(&pdev->dev, ctl_base, 1); + if (!io_addr || !ctl_addr) + goto failed; /* Success. Disable the IRQ nIEN line, do quirks */ - outb(0x02, ctl_base); + iowrite8(0x02, ctl_addr); if (is_kme) - outb(0x81, ctl_base + 0x01); + iowrite8(0x81, ctl_addr + 0x01); /* FIXME: Could be more ports at base + 0x10 but we only deal with one right now */ - if (pdev->io.NumPorts1 >= 0x20) - printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n"); + if (resource_size(pdev->resource[0]) >= 0x20) + n_ports = 2; + if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620) + ops = &pcmcia_8bit_port_ops; /* - * Having done the PCMCIA plumbing the ATA side is relatively - * sane. + * Having done the PCMCIA plumbing the ATA side is relatively + * sane. */ + ret = -ENOMEM; + host = ata_host_alloc(&pdev->dev, n_ports); + if (!host) + goto failed; + + for (p = 0; p < n_ports; p++) { + ap = host->ports[p]; + + ap->ops = ops; + ap->pio_mask = ATA_PIO0; /* ISA so PIO 0 cycles */ + ap->flags |= ATA_FLAG_SLAVE_POSS; + ap->ioaddr.cmd_addr = io_addr + 0x10 * p; + ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p; + ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p; + ata_sff_std_ports(&ap->ioaddr); - memset(&ae, 0, sizeof(struct ata_probe_ent)); - INIT_LIST_HEAD(&ae.node); - ae.dev = &pdev->dev; - ae.port_ops = &pcmcia_port_ops; - ae.sht = &pcmcia_sht; - ae.n_ports = 1; - ae.pio_mask = 1; /* ISA so PIO 0 cycles */ - ae.irq = pdev->irq.AssignedIRQ; - ae.irq_flags = SA_SHIRQ; - ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST; - ae.port[0].cmd_addr = io_base; - ae.port[0].altstatus_addr = ctl_base; - ae.port[0].ctl_addr = ctl_base; - ata_std_ports(&ae.port[0]); - - if (ata_device_add(&ae) == 0) + ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base); + } + + /* activate */ + ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt, + IRQF_SHARED, &pcmcia_sht); + if (ret) goto failed; - info->ndev = 1; - kfree(stk); + pdev->priv = host; return 0; -cs_failed: - cs_error(pdev, last_fn, last_ret); failed: - kfree(stk); - info->ndev = 0; pcmcia_disable_device(pdev); -out1: - kfree(info); return ret; } @@ -298,39 +300,38 @@ out1: static void pcmcia_remove_one(struct pcmcia_device *pdev) { - struct ata_pcmcia_info *info = pdev->priv; - struct device *dev = &pdev->dev; - - if (info != NULL) { - /* If we have attached the device to the ATA layer, detach it */ - if (info->ndev) { - struct ata_host *host = dev_get_drvdata(dev); - ata_host_remove(host); - dev_set_drvdata(dev, NULL); - } - info->ndev = 0; - pdev->priv = NULL; - } + struct ata_host *host = pdev->priv; + + if (host) + ata_host_detach(host); + pcmcia_disable_device(pdev); - kfree(info); } -static struct pcmcia_device_id pcmcia_devices[] = { +static const struct pcmcia_device_id pcmcia_devices[] = { PCMCIA_DEVICE_FUNC_ID(4), + PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */ PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */ + PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */ + PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */ PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704), - PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), + PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904), + PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */ + PCMCIA_DEVICE_MANF_CARD(0x004f, 0x0000), /* Kingston */ + PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), /* TI emulated */ PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ - PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */ + PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */ PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001), - PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar */ + PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */ + PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */ PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0), PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74), PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), + PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591), PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), @@ -340,12 +341,17 @@ static struct pcmcia_device_id pcmcia_devices[] = { PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), + PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), + PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb), + PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), + PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b), + PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee), PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c), PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591), @@ -355,10 +361,19 @@ static struct pcmcia_device_id pcmcia_devices[] = { PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d), PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6), PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003), + PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443), + PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32), + PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), + PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), + PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), + PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133), + PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), + PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), + PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506), PCMCIA_DEVICE_NULL, }; @@ -366,28 +381,14 @@ MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices); static struct pcmcia_driver pcmcia_driver = { .owner = THIS_MODULE, - .drv = { - .name = DRV_NAME, - }, + .name = DRV_NAME, .id_table = pcmcia_devices, .probe = pcmcia_init_one, .remove = pcmcia_remove_one, }; - -static int __init pcmcia_init(void) -{ - return pcmcia_register_driver(&pcmcia_driver); -} - -static void __exit pcmcia_exit(void) -{ - pcmcia_unregister_driver(&pcmcia_driver); -} +module_pcmcia_driver(pcmcia_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for PCMCIA ATA"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); - -module_init(pcmcia_init); -module_exit(pcmcia_exit); diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index bd4ed6734ed..4d06a5cda98 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c @@ -25,7 +25,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> @@ -33,19 +32,20 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> -#include <asm/io.h> #define DRV_NAME "pata_pdc2027x" -#define DRV_VERSION "0.74-ac5" +#define DRV_VERSION "1.0" #undef PDC_DEBUG #ifdef PDC_DEBUG -#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #else #define PDPRINTK(fmt, args...) #endif enum { + PDC_MMIO_BAR = 5, + PDC_UDMA_100 = 0, PDC_UDMA_133 = 1, @@ -62,12 +62,16 @@ enum { }; static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); -static void pdc2027x_remove_one(struct pci_dev *pdev); -static void pdc2027x_error_handler(struct ata_port *ap); +#ifdef CONFIG_PM_SLEEP +static int pdc2027x_reinit_one(struct pci_dev *pdev); +#endif +static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline); static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev); static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev); -static void pdc2027x_post_set_mode(struct ata_port *ap); static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc); +static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask); +static int pdc2027x_cable_detect(struct ata_port *ap); +static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed); /* * ATA Timing Tables based on 133MHz controller clock. @@ -108,13 +112,14 @@ static struct pdc2027x_udma_timing { }; static const struct pci_device_id pdc2027x_pci_tbl[] = { - { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20268, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_100 }, - { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20269, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 }, - { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20270, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_100 }, - { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20271, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 }, - { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20275, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 }, - { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20276, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 }, - { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20277, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 }, + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), PDC_UDMA_100 }, + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), PDC_UDMA_133 }, + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), PDC_UDMA_100 }, + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), PDC_UDMA_133 }, + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), PDC_UDMA_133 }, + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), PDC_UDMA_133 }, + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), PDC_UDMA_133 }, + { } /* terminate list */ }; @@ -122,111 +127,47 @@ static struct pci_driver pdc2027x_pci_driver = { .name = DRV_NAME, .id_table = pdc2027x_pci_tbl, .probe = pdc2027x_init_one, - .remove = __devexit_p(pdc2027x_remove_one), + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = pdc2027x_reinit_one, +#endif }; static struct scsi_host_template pdc2027x_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations pdc2027x_pata100_ops = { - .port_disable = ata_port_disable, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - + .inherits = &ata_bmdma_port_ops, .check_atapi_dma = pdc2027x_check_atapi_dma, - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_mmio_data_xfer, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = pdc2027x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_pci_host_stop, + .cable_detect = pdc2027x_cable_detect, + .prereset = pdc2027x_prereset, }; static struct ata_port_operations pdc2027x_pata133_ops = { - .port_disable = ata_port_disable, + .inherits = &pdc2027x_pata100_ops, + .mode_filter = pdc2027x_mode_filter, .set_piomode = pdc2027x_set_piomode, .set_dmamode = pdc2027x_set_dmamode, - .post_set_mode = pdc2027x_post_set_mode, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .check_atapi_dma = pdc2027x_check_atapi_dma, - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_mmio_data_xfer, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = pdc2027x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_pci_host_stop, + .set_mode = pdc2027x_set_mode, }; static struct ata_port_info pdc2027x_port_info[] = { /* PDC_UDMA_100 */ { - .sht = &pdc2027x_sht, - .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS | - ATA_FLAG_MMIO, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = ATA_UDMA5, /* udma0-5 */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &pdc2027x_pata100_ops, }, /* PDC_UDMA_133 */ { - .sht = &pdc2027x_sht, - .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS | - ATA_FLAG_MMIO, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = ATA_UDMA6, /* udma0-6 */ + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &pdc2027x_pata133_ops, }, }; @@ -244,7 +185,7 @@ MODULE_DEVICE_TABLE(pci, pdc2027x_pci_tbl); */ static inline void __iomem *port_mmio(struct ata_port *ap, unsigned int offset) { - return ap->host->mmio_base + ap->port_no * 0x100 + offset; + return ap->host->iomap[PDC_MMIO_BAR] + ap->port_no * 0x100 + offset; } /** @@ -260,7 +201,7 @@ static inline void __iomem *dev_mmio(struct ata_port *ap, struct ata_device *ade } /** - * pdc2027x_pata_cbl_detect - Probe host controller cable detect info + * pdc2027x_pata_cable_detect - Probe host controller cable detect info * @ap: Port for which cable detect info is desired * * Read 80c cable indicator from Promise extended register. @@ -269,24 +210,21 @@ static inline void __iomem *dev_mmio(struct ata_port *ap, struct ata_device *ade * LOCKING: * None (inherited from caller). */ -static void pdc2027x_cbl_detect(struct ata_port *ap) +static int pdc2027x_cable_detect(struct ata_port *ap) { u32 cgcr; /* check cable detect results */ - cgcr = readl(port_mmio(ap, PDC_GLOBAL_CTL)); + cgcr = ioread32(port_mmio(ap, PDC_GLOBAL_CTL)); if (cgcr & (1 << 26)) goto cbl40; PDPRINTK("No cable or 80-conductor cable on port %d\n", ap->port_no); - ap->cbl = ATA_CBL_PATA80; - return; - + return ATA_CBL_PATA80; cbl40: printk(KERN_INFO DRV_NAME ": 40-conductor cable detected on port %d\n", ap->port_no); - ap->cbl = ATA_CBL_PATA40; - ap->udma_mask &= ATA_UDMA_MASK_40C; + return ATA_CBL_PATA40; } /** @@ -295,12 +233,13 @@ cbl40: */ static inline int pdc2027x_port_enabled(struct ata_port *ap) { - return readb(port_mmio(ap, PDC_ATA_CTL)) & 0x02; + return ioread8(port_mmio(ap, PDC_ATA_CTL)) & 0x02; } /** * pdc2027x_prereset - prereset for PATA host controller - * @ap: Target port + * @link: Target link + * @deadline: deadline jiffies for the operation * * Probeinit including cable detection. * @@ -308,35 +247,44 @@ static inline int pdc2027x_port_enabled(struct ata_port *ap) * None (inherited from caller). */ -static int pdc2027x_prereset(struct ata_port *ap) +static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline) { /* Check whether port enabled */ - if (!pdc2027x_port_enabled(ap)) + if (!pdc2027x_port_enabled(link->ap)) return -ENOENT; - pdc2027x_cbl_detect(ap); - return ata_std_prereset(ap); + return ata_sff_prereset(link, deadline); } /** - * pdc2027x_error_handler - Perform reset on PATA port and classify - * @ap: Port to reset + * pdc2720x_mode_filter - mode selection filter + * @adev: ATA device + * @mask: list of modes proposed * - * Reset PATA phy and classify attached devices. - * - * LOCKING: - * None (inherited from caller). + * Block UDMA on devices that cause trouble with this controller. */ -static void pdc2027x_error_handler(struct ata_port *ap) +static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask) { - ata_bmdma_drive_eh(ap, pdc2027x_prereset, ata_std_softreset, NULL, ata_std_postreset); + unsigned char model_num[ATA_ID_PROD_LEN + 1]; + struct ata_device *pair = ata_dev_pair(adev); + + if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL) + return mask; + + /* Check for slave of a Maxtor at UDMA6 */ + ata_id_c_string(pair->id, model_num, ATA_ID_PROD, + ATA_ID_PROD_LEN + 1); + /* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */ + if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6) + mask &= ~ (1 << (6 + ATA_SHIFT_UDMA)); + + return mask; } /** * pdc2027x_set_piomode - Initialize host controller PATA PIO timings * @ap: Port to configure * @adev: um - * @pio: PIO mode, 0 - 4 * * Set PIO mode for device. * @@ -361,16 +309,16 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev) /* Set the PIO timing registers using value table for 133MHz */ PDPRINTK("Set pio regs... \n"); - ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0)); + ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0)); ctcr0 &= 0xffff0000; ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 | (pdc2027x_pio_timing_tbl[pio].value1 << 8); - writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); + iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); - ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1)); + ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1)); ctcr1 &= 0x00ffffff; ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24); - writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); + iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); PDPRINTK("Set pio regs done\n"); @@ -381,7 +329,6 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev) * pdc2027x_set_dmamode - Initialize host controller PATA UDMA timings * @ap: Port to configure * @adev: um - * @udma: udma mode, XFER_UDMA_0 to XFER_UDMA_6 * * Set UDMA mode for device. * @@ -404,18 +351,18 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev) * If tHOLD is '1', the hardware will add half clock for data hold time. * This code segment seems to be no effect. tHOLD will be overwritten below. */ - ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1)); - writel(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1)); + ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1)); + iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1)); } PDPRINTK("Set udma regs... \n"); - ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1)); + ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1)); ctcr1 &= 0xff000000; ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 | (pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) | (pdc2027x_udma_timing_tbl[udma_mode].value2 << 16); - writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); + iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); PDPRINTK("Set udma regs done\n"); @@ -427,13 +374,13 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev) unsigned int mdma_mode = dma_mode & 0x07; PDPRINTK("Set mdma regs... \n"); - ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0)); + ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0)); ctcr0 &= 0x0000ffff; ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) | (pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24); - writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); + iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); PDPRINTK("Set mdma regs done\n"); PDPRINTK("Set to mdma mode[%u] \n", mdma_mode); @@ -443,38 +390,41 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev) } /** - * pdc2027x_post_set_mode - Set the timing registers back to correct values. - * @ap: Port to configure + * pdc2027x_set_mode - Set the timing registers back to correct values. + * @link: link to configure + * @r_failed: Returned device for failure * * The pdc2027x hardware will look at "SET FEATURES" and change the timing registers * automatically. The values set by the hardware might be incorrect, under 133Mhz PLL. * This function overwrites the possibly incorrect values set by the hardware to be correct. */ -static void pdc2027x_post_set_mode(struct ata_port *ap) +static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed) { - int i; + struct ata_port *ap = link->ap; + struct ata_device *dev; + int rc; - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; + rc = ata_do_set_mode(link, r_failed); + if (rc < 0) + return rc; - if (ata_dev_enabled(dev)) { + ata_for_each_dev(dev, link, ENABLED) { + pdc2027x_set_piomode(ap, dev); - pdc2027x_set_piomode(ap, dev); + /* + * Enable prefetch if the device support PIO only. + */ + if (dev->xfer_shift == ATA_SHIFT_PIO) { + u32 ctcr1 = ioread32(dev_mmio(ap, dev, PDC_CTCR1)); + ctcr1 |= (1 << 25); + iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1)); - /* - * Enable prefetch if the device support PIO only. - */ - if (dev->xfer_shift == ATA_SHIFT_PIO) { - u32 ctcr1 = readl(dev_mmio(ap, dev, PDC_CTCR1)); - ctcr1 |= (1 << 25); - writel(ctcr1, dev_mmio(ap, dev, PDC_CTCR1)); - - PDPRINTK("Turn on prefetch\n"); - } else { - pdc2027x_set_dmamode(ap, dev); - } + PDPRINTK("Turn on prefetch\n"); + } else { + pdc2027x_set_dmamode(ap, dev); } } + return 0; } /** @@ -520,24 +470,23 @@ static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc) /** * pdc_read_counter - Read the ctr counter - * @probe_ent: for the port address + * @host: target ATA host */ -static long pdc_read_counter(struct ata_probe_ent *probe_ent) +static long pdc_read_counter(struct ata_host *host) { + void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR]; long counter; int retry = 1; u32 bccrl, bccrh, bccrlv, bccrhv; retry: - bccrl = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff; - bccrh = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff; - rmb(); + bccrl = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff; + bccrh = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff; /* Read the counter values again for verification */ - bccrlv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff; - bccrhv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff; - rmb(); + bccrlv = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff; + bccrhv = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff; counter = (bccrh << 15) | bccrl; @@ -562,12 +511,12 @@ retry: * adjust_pll - Adjust the PLL input clock in Hz. * * @pdc_controller: controller specific information - * @probe_ent: For the port address + * @host: target ATA host * @pll_clock: The input of PLL in HZ */ -static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsigned int board_idx) +static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int board_idx) { - + void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR]; u16 pll_ctl; long pll_clock_khz = pll_clock / 1000; long pout_required = board_idx? PDC_133_MHZ:PDC_100_MHZ; @@ -586,7 +535,7 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi /* Show the current clock value of PLL control register * (maybe already configured by the firmware) */ - pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL); + pll_ctl = ioread16(mmio_base + PDC_PLL_CTL); PDPRINTK("pll_ctl[%X]\n", pll_ctl); #endif @@ -626,8 +575,8 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl); - writew(pll_ctl, probe_ent->mmio_base + PDC_PLL_CTL); - readw(probe_ent->mmio_base + PDC_PLL_CTL); /* flush */ + iowrite16(pll_ctl, mmio_base + PDC_PLL_CTL); + ioread16(mmio_base + PDC_PLL_CTL); /* flush */ /* Wait the PLL circuit to be stable */ mdelay(30); @@ -637,7 +586,7 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi * Show the current clock value of PLL control register * (maybe configured by the firmware) */ - pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL); + pll_ctl = ioread16(mmio_base + PDC_PLL_CTL); PDPRINTK("pll_ctl[%X]\n", pll_ctl); #endif @@ -647,39 +596,47 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi /** * detect_pll_input_clock - Detect the PLL input clock in Hz. - * @probe_ent: for the port address + * @host: target ATA host * Ex. 16949000 on 33MHz PCI bus for pdc20275. * Half of the PCI clock. */ -static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent) +static long pdc_detect_pll_input_clock(struct ata_host *host) { + void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR]; u32 scr; long start_count, end_count; - long pll_clock; - - /* Read current counter value */ - start_count = pdc_read_counter(probe_ent); + struct timeval start_time, end_time; + long pll_clock, usec_elapsed; /* Start the test mode */ - scr = readl(probe_ent->mmio_base + PDC_SYS_CTL); + scr = ioread32(mmio_base + PDC_SYS_CTL); PDPRINTK("scr[%X]\n", scr); - writel(scr | (0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL); - readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */ + iowrite32(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL); + ioread32(mmio_base + PDC_SYS_CTL); /* flush */ + + /* Read current counter value */ + start_count = pdc_read_counter(host); + do_gettimeofday(&start_time); /* Let the counter run for 100 ms. */ mdelay(100); /* Read the counter values again */ - end_count = pdc_read_counter(probe_ent); + end_count = pdc_read_counter(host); + do_gettimeofday(&end_time); /* Stop the test mode */ - scr = readl(probe_ent->mmio_base + PDC_SYS_CTL); + scr = ioread32(mmio_base + PDC_SYS_CTL); PDPRINTK("scr[%X]\n", scr); - writel(scr & ~(0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL); - readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */ + iowrite32(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL); + ioread32(mmio_base + PDC_SYS_CTL); /* flush */ /* calculate the input clock in Hz */ - pll_clock = (start_count - end_count) * 10; + usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 + + (end_time.tv_usec - start_time.tv_usec); + + pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 * + (100000000 / usec_elapsed); PDPRINTK("start[%ld] end[%ld] \n", start_count, end_count); PDPRINTK("PLL input clock[%ld]Hz\n", pll_clock); @@ -689,11 +646,10 @@ static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent) /** * pdc_hardware_init - Initialize the hardware. - * @pdev: instance of pci_dev found - * @pdc_controller: controller specific information - * @pe: for the port address + * @host: target ATA host + * @board_idx: board identifier */ -static int pdc_hardware_init(struct pci_dev *pdev, struct ata_probe_ent *pe, unsigned int board_idx) +static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx) { long pll_clock; @@ -703,15 +659,12 @@ static int pdc_hardware_init(struct pci_dev *pdev, struct ata_probe_ent *pe, uns * Ex. 25MHz or 40MHz, we have to adjust the cycle_time. * The pdc20275 controller employs PLL circuit to help correct timing registers setting. */ - pll_clock = pdc_detect_pll_input_clock(pe); - - if (pll_clock < 0) /* counter overflow? Try again. */ - pll_clock = pdc_detect_pll_input_clock(pe); + pll_clock = pdc_detect_pll_input_clock(host); - dev_printk(KERN_INFO, &pdev->dev, "PLL input clock %ld kHz\n", pll_clock/1000); + dev_info(host->dev, "PLL input clock %ld kHz\n", pll_clock/1000); /* Adjust PLL control register */ - pdc_adjust_pll(pe, pll_clock, board_idx); + pdc_adjust_pll(host, pll_clock, board_idx); return 0; } @@ -721,7 +674,7 @@ static int pdc_hardware_init(struct pci_dev *pdev, struct ata_probe_ent *pe, uns * @port: ata ioports to setup * @base: base address */ -static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base) +static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = port->data_addr = base; @@ -743,125 +696,94 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base) * Called when an instance of PCI adapter is inserted. * This function checks whether the hardware is supported, * initialize hardware and register an instance of ata_host to - * libata by providing struct ata_probe_ent and ata_device_add(). - * (implements struct pci_driver.probe() ) + * libata. (implements struct pci_driver.probe() ) * * @pdev: instance of pci_dev found * @ent: matching entry in the id_tbl[] */ -static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +static int pdc2027x_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { - static int printed_version; + static const unsigned long cmd_offset[] = { 0x17c0, 0x15c0 }; + static const unsigned long bmdma_offset[] = { 0x1000, 0x1008 }; unsigned int board_idx = (unsigned int) ent->driver_data; - - struct ata_probe_ent *probe_ent = NULL; - unsigned long base; + const struct ata_port_info *ppi[] = + { &pdc2027x_port_info[board_idx], NULL }; + struct ata_host *host; void __iomem *mmio_base; - int rc; + int i, rc; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - rc = pci_enable_device(pdev); + /* alloc host */ + host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); + if (!host) + return -ENOMEM; + + /* acquire resources and fill host */ + rc = pcim_enable_device(pdev); if (rc) return rc; - rc = pci_request_regions(pdev, DRV_NAME); + rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME); if (rc) - goto err_out; + return rc; + host->iomap = pcim_iomap_table(pdev); rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) - goto err_out_regions; + return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) - goto err_out_regions; - - /* Prepare the probe entry */ - probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); - if (probe_ent == NULL) { - rc = -ENOMEM; - goto err_out_regions; - } - - probe_ent->dev = pci_dev_to_dev(pdev); - INIT_LIST_HEAD(&probe_ent->node); - - mmio_base = pci_iomap(pdev, 5, 0); - if (!mmio_base) { - rc = -ENOMEM; - goto err_out_free_ent; - } - - base = (unsigned long) mmio_base; + return rc; - probe_ent->sht = pdc2027x_port_info[board_idx].sht; - probe_ent->port_flags = pdc2027x_port_info[board_idx].flags; - probe_ent->pio_mask = pdc2027x_port_info[board_idx].pio_mask; - probe_ent->mwdma_mask = pdc2027x_port_info[board_idx].mwdma_mask; - probe_ent->udma_mask = pdc2027x_port_info[board_idx].udma_mask; - probe_ent->port_ops = pdc2027x_port_info[board_idx].port_ops; + mmio_base = host->iomap[PDC_MMIO_BAR]; - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = SA_SHIRQ; - probe_ent->mmio_base = mmio_base; + for (i = 0; i < 2; i++) { + struct ata_port *ap = host->ports[i]; - pdc_ata_setup_port(&probe_ent->port[0], base + 0x17c0); - probe_ent->port[0].bmdma_addr = base + 0x1000; - pdc_ata_setup_port(&probe_ent->port[1], base + 0x15c0); - probe_ent->port[1].bmdma_addr = base + 0x1008; + pdc_ata_setup_port(&ap->ioaddr, mmio_base + cmd_offset[i]); + ap->ioaddr.bmdma_addr = mmio_base + bmdma_offset[i]; - probe_ent->n_ports = 2; + ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); + ata_port_pbar_desc(ap, PDC_MMIO_BAR, cmd_offset[i], "cmd"); + } - pci_set_master(pdev); //pci_enable_intx(pdev); /* initialize adapter */ - if (pdc_hardware_init(pdev, probe_ent, board_idx) != 0) - goto err_out_free_ent; + if (pdc_hardware_init(host, board_idx) != 0) + return -EIO; - ata_device_add(probe_ent); - kfree(probe_ent); - - return 0; - -err_out_free_ent: - kfree(probe_ent); -err_out_regions: - pci_release_regions(pdev); -err_out: - pci_disable_device(pdev); - return rc; + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &pdc2027x_sht); } -/** - * pdc2027x_remove_one - Called to remove a single instance of the - * adapter. - * - * @dev: The PCI device to remove. - * FIXME: module load/unload not working yet - */ -static void __devexit pdc2027x_remove_one(struct pci_dev *pdev) +#ifdef CONFIG_PM_SLEEP +static int pdc2027x_reinit_one(struct pci_dev *pdev) { - ata_pci_remove_one(pdev); -} + struct ata_host *host = pci_get_drvdata(pdev); + unsigned int board_idx; + int rc; -/** - * pdc2027x_init - Called after this module is loaded into the kernel. - */ -static int __init pdc2027x_init(void) -{ - return pci_module_init(&pdc2027x_pci_driver); -} + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; -/** - * pdc2027x_exit - Called before this module unloaded from the kernel - */ -static void __exit pdc2027x_exit(void) -{ - pci_unregister_driver(&pdc2027x_pci_driver); + if (pdev->device == PCI_DEVICE_ID_PROMISE_20268 || + pdev->device == PCI_DEVICE_ID_PROMISE_20270) + board_idx = PDC_UDMA_100; + else + board_idx = PDC_UDMA_133; + + if (pdc_hardware_init(host, board_idx)) + return -EIO; + + ata_host_resume(host); + return 0; } +#endif -module_init(pdc2027x_init); -module_exit(pdc2027x_exit); +module_pci_driver(pdc2027x_pci_driver); diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index 48f43432764..9001991d283 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c @@ -1,7 +1,8 @@ /* * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox <alan@redhat.com> + * Alan Cox <alan@lxorguk.ukuu.org.uk> + * (C) 2007,2009,2010 Bartlomiej Zolnierkiewicz * * Based in part on linux/drivers/ide/pci/pdc202xx_old.c * @@ -14,56 +15,57 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_pdc202xx_old" -#define DRV_VERSION "0.2.1" +#define DRV_VERSION "0.4.3" -/** - * pdc2024x_pre_reset - probe begin - * @ap: ATA port - * - * Set up cable type and use generic probe init - */ - -static int pdc2024x_pre_reset(struct ata_port *ap) -{ - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - - -static void pdc2024x_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, pdc2024x_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} - - -static int pdc2026x_pre_reset(struct ata_port *ap) +static int pdc2026x_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u16 cis; pci_read_config_word(pdev, 0x50, &cis); if (cis & (1 << (10 + ap->port_no))) - ap->cbl = ATA_CBL_PATA80; - else - ap->cbl = ATA_CBL_PATA40; + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; +} + +static void pdc202xx_exec_command(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); - return ata_std_prereset(ap); + iowrite8(tf->command, ap->ioaddr.command_addr); + ndelay(400); } -static void pdc2026x_error_handler(struct ata_port *ap) +static bool pdc202xx_irq_check(struct ata_port *ap) { - ata_bmdma_drive_eh(ap, pdc2026x_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + unsigned long master = pci_resource_start(pdev, 4); + u8 sc1d = inb(master + 0x1d); + + if (ap->port_no) { + /* + * bit 7: error, bit 6: interrupting, + * bit 5: FIFO full, bit 4: FIFO empty + */ + return sc1d & 0x40; + } else { + /* + * bit 3: error, bit 2: interrupting, + * bit 1: FIFO full, bit 0: FIFO empty + */ + return sc1d & 0x04; + } } /** - * pdc_configure_piomode - set chip PIO timing + * pdc202xx_configure_piomode - set chip PIO timing * @ap: ATA interface * @adev: ATA device * @pio: PIO mode @@ -73,10 +75,10 @@ static void pdc2026x_error_handler(struct ata_port *ap) * versa */ -static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) +static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); - int port = 0x60 + 4 * ap->port_no + 2 * adev->devno; + int port = 0x60 + 8 * ap->port_no + 4 * adev->devno; static u16 pio_timing[5] = { 0x0913, 0x050C , 0x0308, 0x0206, 0x0104 }; @@ -85,7 +87,7 @@ static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev, pci_read_config_byte(pdev, port, &r_ap); pci_read_config_byte(pdev, port + 1, &r_bp); r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */ - r_bp &= ~0x07; + r_bp &= ~0x1F; r_ap |= (pio_timing[pio] >> 8); r_bp |= (pio_timing[pio] & 0xFF); @@ -98,7 +100,7 @@ static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev, } /** - * pdc_set_piomode - set initial PIO mode data + * pdc202xx_set_piomode - set initial PIO mode data * @ap: ATA interface * @adev: ATA device * @@ -106,13 +108,13 @@ static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev, * but we want to set the PIO timing by default. */ -static void pdc_set_piomode(struct ata_port *ap, struct ata_device *adev) +static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev) { - pdc_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); + pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); } /** - * pdc_configure_dmamode - set DMA mode in chip + * pdc202xx_configure_dmamode - set DMA mode in chip * @ap: ATA interface * @adev: ATA device * @@ -120,10 +122,10 @@ static void pdc_set_piomode(struct ata_port *ap, struct ata_device *adev) * to occur. */ -static void pdc_set_dmamode(struct ata_port *ap, struct ata_device *adev) +static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); - int port = 0x60 + 4 * ap->port_no + 2 * adev->devno; + int port = 0x60 + 8 * ap->port_no + 4 * adev->devno; static u8 udma_timing[6][2] = { { 0x60, 0x03 }, /* 33 Mhz Clock */ { 0x40, 0x02 }, @@ -132,12 +134,17 @@ static void pdc_set_dmamode(struct ata_port *ap, struct ata_device *adev) { 0x20, 0x01 }, { 0x20, 0x01 } }; + static u8 mdma_timing[3][2] = { + { 0xe0, 0x0f }, + { 0x60, 0x04 }, + { 0x60, 0x03 }, + }; u8 r_bp, r_cp; pci_read_config_byte(pdev, port + 1, &r_bp); pci_read_config_byte(pdev, port + 2, &r_cp); - r_bp &= ~0xF0; + r_bp &= ~0xE0; r_cp &= ~0x0F; if (adev->dma_mode >= XFER_UDMA_0) { @@ -147,8 +154,8 @@ static void pdc_set_dmamode(struct ata_port *ap, struct ata_device *adev) } else { int speed = adev->dma_mode - XFER_MW_DMA_0; - r_bp |= 0x60; - r_cp |= (5 - speed); + r_bp |= mdma_timing[speed][0]; + r_cp |= mdma_timing[speed][1]; } pci_write_config_byte(pdev, port + 1, r_bp); pci_write_config_byte(pdev, port + 2, r_cp); @@ -161,6 +168,9 @@ static void pdc_set_dmamode(struct ata_port *ap, struct ata_device *adev) * * In UDMA3 or higher we have to clock switch for the duration of the * DMA transfer sequence. + * + * Note: The host lock held by the libata layer protects + * us from two channels both trying to set DMA bits at once */ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc) @@ -170,36 +180,32 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc) struct ata_taskfile *tf = &qc->tf; int sel66 = ap->port_no ? 0x08: 0x02; - unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr; - unsigned long clock = master + 0x11; - unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no); + void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr; + void __iomem *clock = master + 0x11; + void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no); u32 len; /* Check we keep host level locking here */ - if (adev->dma_mode >= XFER_UDMA_2) - outb(inb(clock) | sel66, clock); + if (adev->dma_mode > XFER_UDMA_2) + iowrite8(ioread8(clock) | sel66, clock); else - outb(inb(clock) & ~sel66, clock); + iowrite8(ioread8(clock) & ~sel66, clock); /* The DMA clocks may have been trashed by a reset. FIXME: make conditional and move to qc_issue ? */ - pdc_set_dmamode(ap, qc->dev); + pdc202xx_set_dmamode(ap, qc->dev); /* Cases the state machine will not complete correctly without help */ - if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA) - { - if (tf->flags & ATA_TFLAG_LBA48) - len = qc->nsect * 512; - else - len = qc->nbytes; + if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATAPI_PROT_DMA) { + len = qc->nbytes / 2; if (tf->flags & ATA_TFLAG_WRITE) len |= 0x06000000; else len |= 0x05000000; - outl(len, atapi_reg); + iowrite32(len, atapi_reg); } /* Activate DMA */ @@ -212,6 +218,9 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc) * * After a DMA completes we need to put the clock back to 33MHz for * PIO timings. + * + * Note: The host lock held by the libata layer protects + * us from two channels both trying to set DMA bits at once */ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc) @@ -222,26 +231,24 @@ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc) int sel66 = ap->port_no ? 0x08: 0x02; /* The clock bits are in the same register for both channels */ - unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr; - unsigned long clock = master + 0x11; - unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no); + void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr; + void __iomem *clock = master + 0x11; + void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no); /* Cases the state machine will not complete correctly */ - if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) { - outl(0, atapi_reg); - outb(inb(clock) & ~sel66, clock); + if (tf->protocol == ATAPI_PROT_DMA || (tf->flags & ATA_TFLAG_LBA48)) { + iowrite32(0, atapi_reg); + iowrite8(ioread8(clock) & ~sel66, clock); } - /* Check we keep host level locking here */ /* Flip back to 33Mhz for PIO */ - if (adev->dma_mode >= XFER_UDMA_2) - outb(inb(clock) & ~sel66, clock); - + if (adev->dma_mode > XFER_UDMA_2) + iowrite8(ioread8(clock) & ~sel66, clock); ata_bmdma_stop(qc); + pdc202xx_set_piomode(ap, adev); } /** * pdc2026x_dev_config - device setup hook - * @ap: ATA port * @adev: newly found device * * Perform chip specific early setup. We need to lock the transfer @@ -249,175 +256,137 @@ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc) * barf. */ -static void pdc2026x_dev_config(struct ata_port *ap, struct ata_device *adev) +static void pdc2026x_dev_config(struct ata_device *adev) { adev->max_sectors = 256; } -static struct scsi_host_template pdc_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, +static int pdc2026x_port_start(struct ata_port *ap) +{ + void __iomem *bmdma = ap->ioaddr.bmdma_addr; + if (bmdma) { + /* Enable burst mode */ + u8 burst = ioread8(bmdma + 0x1f); + iowrite8(burst | 0x01, bmdma + 0x1f); + } + return ata_bmdma_port_start(ap); +} + +/** + * pdc2026x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command + * @qc: Metadata associated with taskfile to check + * + * Just say no - not supported on older Promise. + * + * LOCKING: + * None (inherited from caller). + * + * RETURNS: 0 when ATAPI DMA can be used + * 1 otherwise + */ + +static int pdc2026x_check_atapi_dma(struct ata_queued_cmd *qc) +{ + return 1; +} + +static struct scsi_host_template pdc202xx_sht = { + ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations pdc2024x_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = pdc_set_piomode, - .set_dmamode = pdc_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = pdc2024x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .inherits = &ata_bmdma_port_ops, + + .cable_detect = ata_cable_40wire, + .set_piomode = pdc202xx_set_piomode, + .set_dmamode = pdc202xx_set_dmamode, + + .sff_exec_command = pdc202xx_exec_command, + .sff_irq_check = pdc202xx_irq_check, }; static struct ata_port_operations pdc2026x_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = pdc_set_piomode, - .set_dmamode = pdc_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - .dev_config = pdc2026x_dev_config, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = pdc2026x_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = pdc2026x_bmdma_start, - .bmdma_stop = pdc2026x_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .inherits = &pdc2024x_port_ops, + + .check_atapi_dma = pdc2026x_check_atapi_dma, + .bmdma_start = pdc2026x_bmdma_start, + .bmdma_stop = pdc2026x_bmdma_stop, + + .cable_detect = pdc2026x_cable_detect, + .dev_config = pdc2026x_dev_config, + + .port_start = pdc2026x_port_start, + + .sff_exec_command = pdc202xx_exec_command, + .sff_irq_check = pdc202xx_irq_check, }; -static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id) +static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - static struct ata_port_info info[3] = { + static const struct ata_port_info info[3] = { { - .sht = &pdc_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &pdc2024x_port_ops }, { - .sht = &pdc_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &pdc2026x_port_ops }, { - .sht = &pdc_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &pdc2026x_port_ops } }; - static struct ata_port_info *port_info[2]; - - port_info[0] = port_info[1] = &info[id->driver_data]; + const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL }; if (dev->device == PCI_DEVICE_ID_PROMISE_20265) { struct pci_dev *bridge = dev->bus->self; /* Don't grab anything behind a Promise I2O RAID */ if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) { - if( bridge->device == PCI_DEVICE_ID_INTEL_I960) + if (bridge->device == PCI_DEVICE_ID_INTEL_I960) return -ENODEV; - if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM) + if (bridge->device == PCI_DEVICE_ID_INTEL_I960RM) return -ENODEV; } } - return ata_pci_init_one(dev, port_info, 2); + return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); } -static struct pci_device_id pdc[] = { - { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0}, - { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1}, - { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1}, - { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2}, - { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2}, - { 0, }, -}; +static const struct pci_device_id pdc202xx[] = { + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 }, + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 }, + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 }, + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 }, + { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 }, -static struct pci_driver pdc_pci_driver = { - .name = DRV_NAME, - .id_table = pdc, - .probe = pdc_init_one, - .remove = ata_pci_remove_one + { }, }; -static int __init pdc_init(void) -{ - return pci_register_driver(&pdc_pci_driver); -} - - -static void __exit pdc_exit(void) -{ - pci_unregister_driver(&pdc_pci_driver); -} +static struct pci_driver pdc202xx_pci_driver = { + .name = DRV_NAME, + .id_table = pdc202xx, + .probe = pdc202xx_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif +}; +module_pci_driver(pdc202xx_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267"); MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(pci, pdc); +MODULE_DEVICE_TABLE(pci, pdc202xx); MODULE_VERSION(DRV_VERSION); - -module_init(pdc_init); -module_exit(pdc_exit); diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c new file mode 100644 index 00000000000..35cb0e26323 --- /dev/null +++ b/drivers/ata/pata_piccolo.c @@ -0,0 +1,125 @@ +/* + * pata_piccolo.c - Toshiba Piccolo PATA/SATA controller driver. + * + * This is basically an update to ata_generic.c to add Toshiba Piccolo support + * then split out to keep ata_generic "clean". + * + * Copyright 2005 Red Hat Inc, all rights reserved. + * + * Elements from ide/pci/generic.c + * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> + * Portions (C) Copyright 2002 Red Hat Inc <alan@redhat.com> + * + * May be copied or modified under the terms of the GNU General Public License + * + * The timing data tables/programming info are courtesy of the NetBSD driver + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> + +#define DRV_NAME "pata_piccolo" +#define DRV_VERSION "0.0.1" + + + +static void tosh_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + static const u16 pio[6] = { /* For reg 0x50 low word & E088 */ + 0x0566, 0x0433, 0x0311, 0x0201, 0x0200, 0x0100 + }; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u16 conf; + pci_read_config_word(pdev, 0x50, &conf); + conf &= 0xE088; + conf |= pio[adev->pio_mode - XFER_PIO_0]; + pci_write_config_word(pdev, 0x50, conf); +} + +static void tosh_set_dmamode(struct ata_port *ap, struct ata_device *adev) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u32 conf; + pci_read_config_dword(pdev, 0x5C, &conf); + conf &= 0x78FFE088; /* Keep the other bits */ + if (adev->dma_mode >= XFER_UDMA_0) { + int udma = adev->dma_mode - XFER_UDMA_0; + conf |= 0x80000000; + conf |= (udma + 2) << 28; + conf |= (2 - udma) * 0x111; /* spread into three nibbles */ + } else { + static const u32 mwdma[4] = { + 0x0655, 0x0200, 0x0200, 0x0100 + }; + conf |= mwdma[adev->dma_mode - XFER_MW_DMA_0]; + } + pci_write_config_dword(pdev, 0x5C, conf); +} + + +static struct scsi_host_template tosh_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations tosh_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_unknown, + .set_piomode = tosh_set_piomode, + .set_dmamode = tosh_set_dmamode +}; + +/** + * ata_tosh_init - attach generic IDE + * @dev: PCI device found + * @id: match entry + * + * Called each time a matching IDE interface is found. We check if the + * interface is one we wish to claim and if so we perform any chip + * specific hacks then let the ATA layer do the heavy lifting. + */ + +static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id) +{ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO5, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA2, + .port_ops = &tosh_port_ops + }; + const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; + /* Just one port for the moment */ + return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0); +} + +static struct pci_device_id ata_tosh[] = { + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), }, + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, + { 0, }, +}; + +static struct pci_driver ata_tosh_pci_driver = { + .name = DRV_NAME, + .id_table = ata_tosh, + .probe = ata_tosh_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif +}; + +module_pci_driver(ata_tosh_pci_driver); + +MODULE_AUTHOR("Alan Cox"); +MODULE_DESCRIPTION("Low level driver for Toshiba Piccolo ATA"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, ata_tosh); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c new file mode 100644 index 00000000000..a5579b55e33 --- /dev/null +++ b/drivers/ata/pata_platform.c @@ -0,0 +1,241 @@ +/* + * Generic platform device PATA driver + * + * Copyright (C) 2006 - 2007 Paul Mundt + * + * Based on pata_pcmcia: + * + * Copyright 2005-2006 Red Hat Inc, all rights reserved. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/blkdev.h> +#include <scsi/scsi_host.h> +#include <linux/ata.h> +#include <linux/libata.h> +#include <linux/platform_device.h> +#include <linux/ata_platform.h> + +#define DRV_NAME "pata_platform" +#define DRV_VERSION "1.2" + +static int pio_mask = 1; + +/* + * Provide our own set_mode() as we don't want to change anything that has + * already been configured.. + */ +static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unused) +{ + struct ata_device *dev; + + ata_for_each_dev(dev, link, ENABLED) { + /* We don't really care */ + dev->pio_mode = dev->xfer_mode = XFER_PIO_0; + dev->xfer_shift = ATA_SHIFT_PIO; + dev->flags |= ATA_DFLAG_PIO; + ata_dev_info(dev, "configured for PIO\n"); + } + return 0; +} + +static struct scsi_host_template pata_platform_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static struct ata_port_operations pata_platform_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + .cable_detect = ata_cable_unknown, + .set_mode = pata_platform_set_mode, +}; + +static void pata_platform_setup_port(struct ata_ioports *ioaddr, + unsigned int shift) +{ + /* Fixup the port shift for platforms that need it */ + ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << shift); + ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << shift); + ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << shift); + ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << shift); + ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << shift); + ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << shift); + ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << shift); + ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << shift); + ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << shift); + ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << shift); +} + +/** + * __pata_platform_probe - attach a platform interface + * @dev: device + * @io_res: Resource representing I/O base + * @ctl_res: Resource representing CTL base + * @irq_res: Resource representing IRQ and its flags + * @ioport_shift: I/O port shift + * @__pio_mask: PIO mask + * + * Register a platform bus IDE interface. Such interfaces are PIO and we + * assume do not support IRQ sharing. + * + * Platform devices are expected to contain at least 2 resources per port: + * + * - I/O Base (IORESOURCE_IO or IORESOURCE_MEM) + * - CTL Base (IORESOURCE_IO or IORESOURCE_MEM) + * + * and optionally: + * + * - IRQ (IORESOURCE_IRQ) + * + * If the base resources are both mem types, the ioremap() is handled + * here. For IORESOURCE_IO, it's assumed that there's no remapping + * necessary. + * + * If no IRQ resource is present, PIO polling mode is used instead. + */ +int __pata_platform_probe(struct device *dev, struct resource *io_res, + struct resource *ctl_res, struct resource *irq_res, + unsigned int ioport_shift, int __pio_mask) +{ + struct ata_host *host; + struct ata_port *ap; + unsigned int mmio; + int irq = 0; + int irq_flags = 0; + + /* + * Check for MMIO + */ + mmio = (( io_res->flags == IORESOURCE_MEM) && + (ctl_res->flags == IORESOURCE_MEM)); + + /* + * And the IRQ + */ + if (irq_res && irq_res->start > 0) { + irq = irq_res->start; + irq_flags = irq_res->flags; + } + + /* + * Now that that's out of the way, wire up the port.. + */ + host = ata_host_alloc(dev, 1); + if (!host) + return -ENOMEM; + ap = host->ports[0]; + + ap->ops = &pata_platform_port_ops; + ap->pio_mask = __pio_mask; + ap->flags |= ATA_FLAG_SLAVE_POSS; + + /* + * Use polling mode if there's no IRQ + */ + if (!irq) { + ap->flags |= ATA_FLAG_PIO_POLLING; + ata_port_desc(ap, "no IRQ, using PIO polling"); + } + + /* + * Handle the MMIO case + */ + if (mmio) { + ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start, + resource_size(io_res)); + ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start, + resource_size(ctl_res)); + } else { + ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start, + resource_size(io_res)); + ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start, + resource_size(ctl_res)); + } + if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) { + dev_err(dev, "failed to map IO/CTL base\n"); + return -ENOMEM; + } + + ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; + + pata_platform_setup_port(&ap->ioaddr, ioport_shift); + + ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport", + (unsigned long long)io_res->start, + (unsigned long long)ctl_res->start); + + /* activate */ + return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL, + irq_flags, &pata_platform_sht); +} +EXPORT_SYMBOL_GPL(__pata_platform_probe); + +static int pata_platform_probe(struct platform_device *pdev) +{ + struct resource *io_res; + struct resource *ctl_res; + struct resource *irq_res; + struct pata_platform_info *pp_info = dev_get_platdata(&pdev->dev); + + /* + * Simple resource validation .. + */ + if ((pdev->num_resources != 3) && (pdev->num_resources != 2)) { + dev_err(&pdev->dev, "invalid number of resources\n"); + return -EINVAL; + } + + /* + * Get the I/O base first + */ + io_res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (io_res == NULL) { + io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(io_res == NULL)) + return -EINVAL; + } + + /* + * Then the CTL base + */ + ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1); + if (ctl_res == NULL) { + ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (unlikely(ctl_res == NULL)) + return -EINVAL; + } + + /* + * And the IRQ + */ + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (irq_res) + irq_res->flags = pp_info ? pp_info->irq_flags : 0; + + return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res, + pp_info ? pp_info->ioport_shift : 0, + pio_mask); +} + +static struct platform_driver pata_platform_driver = { + .probe = pata_platform_probe, + .remove = ata_platform_remove_one, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(pata_platform_driver); + +module_param(pio_mask, int, 0); + +MODULE_AUTHOR("Paul Mundt"); +MODULE_DESCRIPTION("low-level driver for platform device ATA"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c new file mode 100644 index 00000000000..73259bfda1e --- /dev/null +++ b/drivers/ata/pata_pxa.c @@ -0,0 +1,398 @@ +/* + * Generic PXA PATA driver + * + * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/blkdev.h> +#include <linux/ata.h> +#include <linux/libata.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> +#include <linux/slab.h> +#include <linux/completion.h> + +#include <scsi/scsi_host.h> + +#include <mach/pxa2xx-regs.h> +#include <linux/platform_data/ata-pxa.h> +#include <mach/dma.h> + +#define DRV_NAME "pata_pxa" +#define DRV_VERSION "0.1" + +struct pata_pxa_data { + uint32_t dma_channel; + struct pxa_dma_desc *dma_desc; + dma_addr_t dma_desc_addr; + uint32_t dma_desc_id; + + /* DMA IO physical address */ + uint32_t dma_io_addr; + /* PXA DREQ<0:2> pin selector */ + uint32_t dma_dreq; + /* DMA DCSR register value */ + uint32_t dma_dcsr; + + struct completion dma_done; +}; + +/* + * Setup the DMA descriptors. The size is transfer capped at 4k per descriptor, + * if the transfer is longer, it is split into multiple chained descriptors. + */ +static void pxa_load_dmac(struct scatterlist *sg, struct ata_queued_cmd *qc) +{ + struct pata_pxa_data *pd = qc->ap->private_data; + + uint32_t cpu_len, seg_len; + dma_addr_t cpu_addr; + + cpu_addr = sg_dma_address(sg); + cpu_len = sg_dma_len(sg); + + do { + seg_len = (cpu_len > 0x1000) ? 0x1000 : cpu_len; + + pd->dma_desc[pd->dma_desc_id].ddadr = pd->dma_desc_addr + + ((pd->dma_desc_id + 1) * sizeof(struct pxa_dma_desc)); + + pd->dma_desc[pd->dma_desc_id].dcmd = DCMD_BURST32 | + DCMD_WIDTH2 | (DCMD_LENGTH & seg_len); + + if (qc->tf.flags & ATA_TFLAG_WRITE) { + pd->dma_desc[pd->dma_desc_id].dsadr = cpu_addr; + pd->dma_desc[pd->dma_desc_id].dtadr = pd->dma_io_addr; + pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCSRCADDR | + DCMD_FLOWTRG; + } else { + pd->dma_desc[pd->dma_desc_id].dsadr = pd->dma_io_addr; + pd->dma_desc[pd->dma_desc_id].dtadr = cpu_addr; + pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCTRGADDR | + DCMD_FLOWSRC; + } + + cpu_len -= seg_len; + cpu_addr += seg_len; + pd->dma_desc_id++; + + } while (cpu_len); + + /* Should not happen */ + if (seg_len & 0x1f) + DALGN |= (1 << pd->dma_dreq); +} + +/* + * Prepare taskfile for submission. + */ +static void pxa_qc_prep(struct ata_queued_cmd *qc) +{ + struct pata_pxa_data *pd = qc->ap->private_data; + int si = 0; + struct scatterlist *sg; + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + pd->dma_desc_id = 0; + + DCSR(pd->dma_channel) = 0; + DALGN &= ~(1 << pd->dma_dreq); + + for_each_sg(qc->sg, sg, qc->n_elem, si) + pxa_load_dmac(sg, qc); + + pd->dma_desc[pd->dma_desc_id - 1].ddadr = DDADR_STOP; + + /* Fire IRQ only at the end of last block */ + pd->dma_desc[pd->dma_desc_id - 1].dcmd |= DCMD_ENDIRQEN; + + DDADR(pd->dma_channel) = pd->dma_desc_addr; + DRCMR(pd->dma_dreq) = DRCMR_MAPVLD | pd->dma_channel; + +} + +/* + * Configure the DMA controller, load the DMA descriptors, but don't start the + * DMA controller yet. Only issue the ATA command. + */ +static void pxa_bmdma_setup(struct ata_queued_cmd *qc) +{ + qc->ap->ops->sff_exec_command(qc->ap, &qc->tf); +} + +/* + * Execute the DMA transfer. + */ +static void pxa_bmdma_start(struct ata_queued_cmd *qc) +{ + struct pata_pxa_data *pd = qc->ap->private_data; + init_completion(&pd->dma_done); + DCSR(pd->dma_channel) = DCSR_RUN; +} + +/* + * Wait until the DMA transfer completes, then stop the DMA controller. + */ +static void pxa_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct pata_pxa_data *pd = qc->ap->private_data; + + if ((DCSR(pd->dma_channel) & DCSR_RUN) && + wait_for_completion_timeout(&pd->dma_done, HZ)) + dev_err(qc->ap->dev, "Timeout waiting for DMA completion!"); + + DCSR(pd->dma_channel) = 0; +} + +/* + * Read DMA status. The bmdma_stop() will take care of properly finishing the + * DMA transfer so we always have DMA-complete interrupt here. + */ +static unsigned char pxa_bmdma_status(struct ata_port *ap) +{ + struct pata_pxa_data *pd = ap->private_data; + unsigned char ret = ATA_DMA_INTR; + + if (pd->dma_dcsr & DCSR_BUSERR) + ret |= ATA_DMA_ERR; + + return ret; +} + +/* + * No IRQ register present so we do nothing. + */ +static void pxa_irq_clear(struct ata_port *ap) +{ +} + +/* + * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still + * unclear why ATAPI has DMA issues. + */ +static int pxa_check_atapi_dma(struct ata_queued_cmd *qc) +{ + return -EOPNOTSUPP; +} + +static struct scsi_host_template pxa_ata_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations pxa_ata_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_40wire, + + .bmdma_setup = pxa_bmdma_setup, + .bmdma_start = pxa_bmdma_start, + .bmdma_stop = pxa_bmdma_stop, + .bmdma_status = pxa_bmdma_status, + + .check_atapi_dma = pxa_check_atapi_dma, + + .sff_irq_clear = pxa_irq_clear, + + .qc_prep = pxa_qc_prep, +}; + +/* + * DMA interrupt handler. + */ +static void pxa_ata_dma_irq(int dma, void *port) +{ + struct ata_port *ap = port; + struct pata_pxa_data *pd = ap->private_data; + + pd->dma_dcsr = DCSR(dma); + DCSR(dma) = pd->dma_dcsr; + + if (pd->dma_dcsr & DCSR_STOPSTATE) + complete(&pd->dma_done); +} + +static int pxa_ata_probe(struct platform_device *pdev) +{ + struct ata_host *host; + struct ata_port *ap; + struct pata_pxa_data *data; + struct resource *cmd_res; + struct resource *ctl_res; + struct resource *dma_res; + struct resource *irq_res; + struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev); + int ret = 0; + + /* + * Resource validation, three resources are needed: + * - CMD port base address + * - CTL port base address + * - DMA port base address + * - IRQ pin + */ + if (pdev->num_resources != 4) { + dev_err(&pdev->dev, "invalid number of resources\n"); + return -EINVAL; + } + + /* + * CMD port base address + */ + cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(cmd_res == NULL)) + return -EINVAL; + + /* + * CTL port base address + */ + ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (unlikely(ctl_res == NULL)) + return -EINVAL; + + /* + * DMA port base address + */ + dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (unlikely(dma_res == NULL)) + return -EINVAL; + + /* + * IRQ pin + */ + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (unlikely(irq_res == NULL)) + return -EINVAL; + + /* + * Allocate the host + */ + host = ata_host_alloc(&pdev->dev, 1); + if (!host) + return -ENOMEM; + + ap = host->ports[0]; + ap->ops = &pxa_ata_port_ops; + ap->pio_mask = ATA_PIO4; + ap->mwdma_mask = ATA_MWDMA2; + + ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, + resource_size(cmd_res)); + ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, + resource_size(ctl_res)); + ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start, + resource_size(dma_res)); + + /* + * Adjust register offsets + */ + ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; + ap->ioaddr.data_addr = ap->ioaddr.cmd_addr + + (ATA_REG_DATA << pdata->reg_shift); + ap->ioaddr.error_addr = ap->ioaddr.cmd_addr + + (ATA_REG_ERR << pdata->reg_shift); + ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr + + (ATA_REG_FEATURE << pdata->reg_shift); + ap->ioaddr.nsect_addr = ap->ioaddr.cmd_addr + + (ATA_REG_NSECT << pdata->reg_shift); + ap->ioaddr.lbal_addr = ap->ioaddr.cmd_addr + + (ATA_REG_LBAL << pdata->reg_shift); + ap->ioaddr.lbam_addr = ap->ioaddr.cmd_addr + + (ATA_REG_LBAM << pdata->reg_shift); + ap->ioaddr.lbah_addr = ap->ioaddr.cmd_addr + + (ATA_REG_LBAH << pdata->reg_shift); + ap->ioaddr.device_addr = ap->ioaddr.cmd_addr + + (ATA_REG_DEVICE << pdata->reg_shift); + ap->ioaddr.status_addr = ap->ioaddr.cmd_addr + + (ATA_REG_STATUS << pdata->reg_shift); + ap->ioaddr.command_addr = ap->ioaddr.cmd_addr + + (ATA_REG_CMD << pdata->reg_shift); + + /* + * Allocate and load driver's internal data structure + */ + data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + ap->private_data = data; + data->dma_dreq = pdata->dma_dreq; + data->dma_io_addr = dma_res->start; + + /* + * Allocate space for the DMA descriptors + */ + data->dma_desc = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE, + &data->dma_desc_addr, GFP_KERNEL); + if (!data->dma_desc) + return -EINVAL; + + /* + * Request the DMA channel + */ + data->dma_channel = pxa_request_dma(DRV_NAME, DMA_PRIO_LOW, + pxa_ata_dma_irq, ap); + if (data->dma_channel < 0) + return -EBUSY; + + /* + * Stop and clear the DMA channel + */ + DCSR(data->dma_channel) = 0; + + /* + * Activate the ATA host + */ + ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt, + pdata->irq_flags, &pxa_ata_sht); + if (ret) + pxa_free_dma(data->dma_channel); + + return ret; +} + +static int pxa_ata_remove(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + struct pata_pxa_data *data = host->ports[0]->private_data; + + pxa_free_dma(data->dma_channel); + + ata_host_detach(host); + + return 0; +} + +static struct platform_driver pxa_ata_driver = { + .probe = pxa_ata_probe, + .remove = pxa_ata_remove, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(pxa_ata_driver); + +MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); +MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c deleted file mode 100644 index 7977f471d5e..00000000000 --- a/drivers/ata/pata_qdi.c +++ /dev/null @@ -1,403 +0,0 @@ -/* - * pata_qdi.c - QDI VLB ATA controllers - * (C) 2006 Red Hat <alan@redhat.com> - * - * This driver mostly exists as a proof of concept for non PCI devices under - * libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly - * useful. - * - * Tuning code written from the documentation at - * http://www.ryston.cz/petr/vlb/qd6500.html - * http://www.ryston.cz/petr/vlb/qd6580.html - * - * Probe code based on drivers/ide/legacy/qd65xx.c - * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by - * Samuel Thibault <samuel.thibault@fnac.net> - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/blkdev.h> -#include <linux/delay.h> -#include <scsi/scsi_host.h> -#include <linux/libata.h> -#include <linux/platform_device.h> - -#define DRV_NAME "pata_qdi" -#define DRV_VERSION "0.2.4" - -#define NR_HOST 4 /* Two 6580s */ - -struct qdi_data { - unsigned long timing; - u8 clock[2]; - u8 last; - int fast; - struct platform_device *platform_dev; - -}; - -static struct ata_host *qdi_host[NR_HOST]; -static struct qdi_data qdi_data[NR_HOST]; -static int nr_qdi_host; - -#ifdef MODULE -static int probe_qdi = 1; -#else -static int probe_qdi; -#endif - -static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev) -{ - struct ata_timing t; - struct qdi_data *qdi = ap->host->private_data; - int active, recovery; - u8 timing; - - /* Get the timing data in cycles */ - ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); - - if (qdi->fast) { - active = 8 - FIT(t.active, 1, 8); - recovery = 18 - FIT(t.recover, 3, 18); - } else { - active = 9 - FIT(t.active, 2, 9); - recovery = 15 - FIT(t.recover, 0, 15); - } - timing = (recovery << 4) | active | 0x08; - - qdi->clock[adev->devno] = timing; - - outb(timing, qdi->timing); -} - -static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev) -{ - struct ata_timing t; - struct qdi_data *qdi = ap->host->private_data; - int active, recovery; - u8 timing; - - /* Get the timing data in cycles */ - ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); - - if (qdi->fast) { - active = 8 - FIT(t.active, 1, 8); - recovery = 18 - FIT(t.recover, 3, 18); - } else { - active = 9 - FIT(t.active, 2, 9); - recovery = 15 - FIT(t.recover, 0, 15); - } - timing = (recovery << 4) | active | 0x08; - - qdi->clock[adev->devno] = timing; - - outb(timing, qdi->timing); - - /* Clear the FIFO */ - if (adev->class != ATA_DEV_ATA) - outb(0x5F, (qdi->timing & 0xFFF0) + 3); -} - -/** - * qdi_qc_issue_prot - command issue - * @qc: command pending - * - * Called when the libata layer is about to issue a command. We wrap - * this interface so that we can load the correct ATA timings. - */ - -static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - struct ata_device *adev = qc->dev; - struct qdi_data *qdi = ap->host->private_data; - - if (qdi->clock[adev->devno] != qdi->last) { - if (adev->pio_mode) { - qdi->last = qdi->clock[adev->devno]; - outb(qdi->clock[adev->devno], qdi->timing); - } - } - return ata_qc_issue_prot(qc); -} - -static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) -{ - struct ata_port *ap = adev->ap; - int slop = buflen & 3; - - if (ata_id_has_dword_io(adev->id)) { - if (write_data) - outsl(ap->ioaddr.data_addr, buf, buflen >> 2); - else - insl(ap->ioaddr.data_addr, buf, buflen >> 2); - - if (unlikely(slop)) { - u32 pad; - if (write_data) { - memcpy(&pad, buf + buflen - slop, slop); - outl(le32_to_cpu(pad), ap->ioaddr.data_addr); - } else { - pad = cpu_to_le16(inl(ap->ioaddr.data_addr)); - memcpy(buf + buflen - slop, &pad, slop); - } - } - } else - ata_pio_data_xfer(adev, buf, buflen, write_data); -} - -static struct scsi_host_template qdi_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, -}; - -static struct ata_port_operations qdi6500_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = qdi6500_set_piomode, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ata_bmdma_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .qc_prep = ata_qc_prep, - .qc_issue = qdi_qc_issue_prot, - - .data_xfer = qdi_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop -}; - -static struct ata_port_operations qdi6580_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = qdi6580_set_piomode, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ata_bmdma_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .qc_prep = ata_qc_prep, - .qc_issue = qdi_qc_issue_prot, - - .data_xfer = qdi_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop -}; - -/** - * qdi_init_one - attach a qdi interface - * @type: Type to display - * @io: I/O port start - * @irq: interrupt line - * @fast: True if on a > 33Mhz VLB - * - * Register an ISA bus IDE interface. Such interfaces are PIO and we - * assume do not support IRQ sharing. - */ - -static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast) -{ - struct ata_probe_ent ae; - struct platform_device *pdev; - int ret; - - unsigned long ctrl = io + 0x206; - - /* - * Fill in a probe structure first of all - */ - - pdev = platform_device_register_simple(DRV_NAME, nr_qdi_host, NULL, 0); - if (pdev == NULL) - return -ENOMEM; - - memset(&ae, 0, sizeof(struct ata_probe_ent)); - INIT_LIST_HEAD(&ae.node); - ae.dev = &pdev->dev; - - if (type == 6580) { - ae.port_ops = &qdi6580_port_ops; - ae.pio_mask = 0x1F; - } else { - ae.port_ops = &qdi6500_port_ops; - ae.pio_mask = 0x07; /* Actually PIO3 !IORDY is possible */ - } - - ae.sht = &qdi_sht; - ae.n_ports = 1; - ae.irq = irq; - ae.irq_flags = 0; - ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST; - ae.port[0].cmd_addr = io; - ae.port[0].altstatus_addr = ctrl; - ae.port[0].ctl_addr = ctrl; - ata_std_ports(&ae.port[0]); - - /* - * Hook in a private data structure per channel - */ - ae.private_data = &qdi_data[nr_qdi_host]; - - qdi_data[nr_qdi_host].timing = port; - qdi_data[nr_qdi_host].fast = fast; - qdi_data[nr_qdi_host].platform_dev = pdev; - - printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io); - ret = ata_device_add(&ae); - if (ret == 0) { - platform_device_unregister(pdev); - return -ENODEV; - } - - qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev); - return 0; -} - -/** - * qdi_init - attach qdi interfaces - * - * Attach qdi IDE interfaces by scanning the ports it may occupy. - */ - -static __init int qdi_init(void) -{ - unsigned long flags; - static const unsigned long qd_port[2] = { 0x30, 0xB0 }; - static const unsigned long ide_port[2] = { 0x170, 0x1F0 }; - static const int ide_irq[2] = { 14, 15 }; - - int ct = 0; - int i; - - if (probe_qdi == 0) - return -ENODEV; - - /* - * Check each possible QD65xx base address - */ - - for (i = 0; i < 2; i++) { - unsigned long port = qd_port[i]; - u8 r, res; - - - if (request_region(port, 2, "pata_qdi")) { - /* Check for a card */ - local_irq_save(flags); - r = inb_p(port); - outb_p(0x19, port); - res = inb_p(port); - outb_p(r, port); - local_irq_restore(flags); - - /* Fail */ - if (res == 0x19) - { - release_region(port, 2); - continue; - } - - /* Passes the presence test */ - r = inb_p(port + 1); /* Check port agrees with port set */ - if ((r & 2) >> 1 != i) { - release_region(port, 2); - continue; - } - - /* Check card type */ - if ((r & 0xF0) == 0xC0) { - /* QD6500: single channel */ - if (r & 8) { - /* Disabled ? */ - release_region(port, 2); - continue; - } - ct += qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04); - } - if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) { - /* QD6580: dual channel */ - if (!request_region(port + 2 , 2, "pata_qdi")) - { - release_region(port, 2); - continue; - } - res = inb(port + 3); - if (res & 1) { - /* Single channel mode */ - ct += qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04); - } else { - /* Dual channel mode */ - ct += qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04); - ct += qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04); - } - } - } - } - if (ct != 0) - return 0; - return -ENODEV; -} - -static __exit void qdi_exit(void) -{ - int i; - - for (i = 0; i < nr_qdi_host; i++) { - ata_host_remove(qdi_host[i]); - /* Free the control resource. The 6580 dual channel has the resources - * claimed as a pair of 2 byte resources so we need no special cases... - */ - release_region(qdi_data[i].timing, 2); - platform_device_unregister(qdi_data[i].platform_dev); - } -} - -MODULE_AUTHOR("Alan Cox"); -MODULE_DESCRIPTION("low-level driver for qdi ATA"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -module_init(qdi_init); -module_exit(qdi_exit); - -module_param(probe_qdi, int, 0); - diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c index c20bcf43ed6..a3f1123d17a 100644 --- a/drivers/ata/pata_radisys.c +++ b/drivers/ata/pata_radisys.c @@ -1,7 +1,7 @@ /* * pata_radisys.c - Intel PATA/SATA controllers * - * (C) 2006 Red Hat <alan@redhat.com> + * (C) 2006 Red Hat <alan@lxorguk.ukuu.org.uk> * * Some parts based on ata_piix.c by Jeff Garzik and others. * @@ -15,7 +15,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> @@ -24,40 +23,12 @@ #include <linux/ata.h> #define DRV_NAME "pata_radisys" -#define DRV_VERSION "0.4.1" - -/** - * radisys_probe_init - probe begin - * @ap: ATA port - * - * Set up cable type and use generic probe init - */ - -static int radisys_pre_reset(struct ata_port *ap) -{ - ap->cbl = ATA_CBL_PATA80; - return ata_std_prereset(ap); -} - - -/** - * radisys_pata_error_handler - Probe specified port on PATA host controller - * @ap: Port to probe - * @classes: - * - * LOCKING: - * None (inherited from caller). - */ - -static void radisys_pata_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, radisys_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} +#define DRV_VERSION "0.4.4" /** * radisys_set_piomode - Initialize host controller PATA PIO timings - * @ap: Port whose timings we are configuring - * @adev: um + * @ap: ATA port + * @adev: Device whose timings we are configuring * * Set PIO mode for device, in host controller PCI config space. * @@ -109,7 +80,6 @@ static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev) * radisys_set_dmamode - Initialize host controller PATA DMA timings * @ap: Port whose timings we are configuring * @adev: Device to program - * @isich: True if the device is an ICH and has IOCFG registers * * Set MWDMA mode for device, in host controller PCI config space. * @@ -168,9 +138,9 @@ static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev) pci_read_config_byte(dev, 0x4A, &udma_mode); if (adev->xfer_mode == XFER_UDMA_2) - udma_mode &= ~ (1 << adev->devno); + udma_mode &= ~(2 << (adev->devno * 4)); else /* UDMA 4 */ - udma_mode |= (1 << adev->devno); + udma_mode |= (2 << (adev->devno * 4)); pci_write_config_byte(dev, 0x4A, udma_mode); @@ -184,17 +154,17 @@ static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev) } /** - * radisys_qc_issue_prot - command issue + * radisys_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if - * neccessary. Our logic also clears TIME0/TIME1 for the other device so + * necessary. Our logic also clears TIME0/TIME1 for the other device so * that, even if we get this wrong, cycles to the other device will * be made PIO0. */ -static unsigned int radisys_qc_issue_prot(struct ata_queued_cmd *qc) +static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; @@ -208,59 +178,20 @@ static unsigned int radisys_qc_issue_prot(struct ata_queued_cmd *qc) radisys_set_piomode(ap, adev); } } - return ata_qc_issue_prot(qc); + return ata_bmdma_qc_issue(qc); } static struct scsi_host_template radisys_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; -static const struct ata_port_operations radisys_pata_ops = { - .port_disable = ata_port_disable, +static struct ata_port_operations radisys_pata_ops = { + .inherits = &ata_bmdma_port_ops, + .qc_issue = radisys_qc_issue, + .cable_detect = ata_cable_unknown, .set_piomode = radisys_set_piomode, .set_dmamode = radisys_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = radisys_pata_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = radisys_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, }; @@ -281,26 +212,23 @@ static const struct ata_port_operations radisys_pata_ops = { static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - static struct ata_port_info info = { - .sht = &radisys_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma1-2 */ - .udma_mask = 0x14, /* UDMA33/66 only */ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, + .udma_mask = ATA_UDMA24_ONLY, .port_ops = &radisys_pata_ops, }; - static struct ata_port_info *port_info[2] = { &info, &info }; + const struct ata_port_info *ppi[] = { &info, NULL }; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, - "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - return ata_pci_init_one(pdev, port_info, 2); + return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0); } static const struct pci_device_id radisys_pci_tbl[] = { - { 0x1331, 0x8201, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VDEVICE(RADISYS, 0x8201), }, + { } /* terminate list */ }; @@ -309,25 +237,16 @@ static struct pci_driver radisys_pci_driver = { .id_table = radisys_pci_tbl, .probe = radisys_init_one, .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init radisys_init(void) -{ - return pci_register_driver(&radisys_pci_driver); -} - -static void __exit radisys_exit(void) -{ - pci_unregister_driver(&radisys_pci_driver); -} - - -module_init(radisys_init); -module_exit(radisys_exit); +module_pci_driver(radisys_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for Radisys R82600 controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, radisys_pci_tbl); MODULE_VERSION(DRV_VERSION); - diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c new file mode 100644 index 00000000000..3c5eb8fa6bd --- /dev/null +++ b/drivers/ata/pata_rb532_cf.c @@ -0,0 +1,209 @@ +/* + * A low-level PATA driver to handle a Compact Flash connected on the + * Mikrotik's RouterBoard 532 board. + * + * Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org> + * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org> + * + * This file was based on: drivers/ata/pata_ixp4xx_cf.c + * Copyright (C) 2006-07 Tower Technologies + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * Also was based on the driver for Linux 2.4.xx published by Mikrotik for + * their RouterBoard 1xx and 5xx series devices. The original Mikrotik code + * seems not to have a license. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/irq.h> + +#include <linux/libata.h> +#include <scsi/scsi_host.h> + +#include <asm/gpio.h> + +#define DRV_NAME "pata-rb532-cf" +#define DRV_VERSION "0.1.0" +#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" + +#define RB500_CF_MAXPORTS 1 +#define RB500_CF_IO_DELAY 400 + +#define RB500_CF_REG_BASE 0x0800 +#define RB500_CF_REG_ERR 0x080D +#define RB500_CF_REG_CTRL 0x080E +/* 32bit buffered data register offset */ +#define RB500_CF_REG_DBUF32 0x0C00 + +struct rb532_cf_info { + void __iomem *iobase; + unsigned int gpio_line; + unsigned int irq; +}; + +/* ------------------------------------------------------------------------ */ + +static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance) +{ + struct ata_host *ah = dev_instance; + struct rb532_cf_info *info = ah->private_data; + + if (gpio_get_value(info->gpio_line)) { + irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); + ata_sff_interrupt(info->irq, dev_instance); + } else { + irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); + } + + return IRQ_HANDLED; +} + +static struct ata_port_operations rb532_pata_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_sff_data_xfer32, +}; + +/* ------------------------------------------------------------------------ */ + +static struct scsi_host_template rb532_pata_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +/* ------------------------------------------------------------------------ */ + +static void rb532_pata_setup_ports(struct ata_host *ah) +{ + struct rb532_cf_info *info = ah->private_data; + struct ata_port *ap; + + ap = ah->ports[0]; + + ap->ops = &rb532_pata_port_ops; + ap->pio_mask = ATA_PIO4; + + ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE; + ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL; + ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL; + + ata_sff_std_ports(&ap->ioaddr); + + ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DBUF32; + ap->ioaddr.error_addr = info->iobase + RB500_CF_REG_ERR; +} + +static int rb532_pata_driver_probe(struct platform_device *pdev) +{ + int irq; + int gpio; + struct resource *res; + struct ata_host *ah; + struct rb532_cf_info *info; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "no IOMEM resource found\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(&pdev->dev, "no IRQ resource found\n"); + return -ENOENT; + } + + gpio = irq_to_gpio(irq); + if (gpio < 0) { + dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq); + return -ENOENT; + } + + ret = gpio_request(gpio, DRV_NAME); + if (ret) { + dev_err(&pdev->dev, "GPIO request failed\n"); + return ret; + } + + /* allocate host */ + ah = ata_host_alloc(&pdev->dev, RB500_CF_MAXPORTS); + if (!ah) + return -ENOMEM; + + platform_set_drvdata(pdev, ah); + + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + ah->private_data = info; + info->gpio_line = gpio; + info->irq = irq; + + info->iobase = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (!info->iobase) + return -ENOMEM; + + ret = gpio_direction_input(gpio); + if (ret) { + dev_err(&pdev->dev, "unable to set GPIO direction, err=%d\n", + ret); + goto err_free_gpio; + } + + rb532_pata_setup_ports(ah); + + ret = ata_host_activate(ah, irq, rb532_pata_irq_handler, + IRQF_TRIGGER_LOW, &rb532_pata_sht); + if (ret) + goto err_free_gpio; + + return 0; + +err_free_gpio: + gpio_free(gpio); + + return ret; +} + +static int rb532_pata_driver_remove(struct platform_device *pdev) +{ + struct ata_host *ah = platform_get_drvdata(pdev); + struct rb532_cf_info *info = ah->private_data; + + ata_host_detach(ah); + gpio_free(info->gpio_line); + + return 0; +} + +static struct platform_driver rb532_pata_platform_driver = { + .probe = rb532_pata_driver_probe, + .remove = rb532_pata_driver_remove, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +#define DRV_INFO DRV_DESC " version " DRV_VERSION + +module_platform_driver(rb532_pata_platform_driver); + +MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>"); +MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c new file mode 100644 index 00000000000..9ce5952216b --- /dev/null +++ b/drivers/ata/pata_rdc.c @@ -0,0 +1,398 @@ +/* + * pata_rdc - Driver for later RDC PATA controllers + * + * This is actually a driver for hardware meeting + * INCITS 370-2004 (1510D): ATA Host Adapter Standards + * + * Based on ata_piix. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> +#include <linux/dmi.h> + +#define DRV_NAME "pata_rdc" +#define DRV_VERSION "0.01" + +struct rdc_host_priv { + u32 saved_iocfg; +}; + +/** + * rdc_pata_cable_detect - Probe host controller cable detect info + * @ap: Port for which cable detect info is desired + * + * Read 80c cable indicator from ATA PCI device's PCI config + * register. This register is normally set by firmware (BIOS). + * + * LOCKING: + * None (inherited from caller). + */ + +static int rdc_pata_cable_detect(struct ata_port *ap) +{ + struct rdc_host_priv *hpriv = ap->host->private_data; + u8 mask; + + /* check BIOS cable detect results */ + mask = 0x30 << (2 * ap->port_no); + if ((hpriv->saved_iocfg & mask) == 0) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; +} + +/** + * rdc_pata_prereset - prereset for PATA host controller + * @link: Target link + * @deadline: deadline jiffies for the operation + * + * LOCKING: + * None (inherited from caller). + */ +static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + + static const struct pci_bits rdc_enable_bits[] = { + { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ + { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */ + }; + + if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no])) + return -ENOENT; + return ata_sff_prereset(link, deadline); +} + +static DEFINE_SPINLOCK(rdc_lock); + +/** + * rdc_set_piomode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: um + * + * Set PIO mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + unsigned int pio = adev->pio_mode - XFER_PIO_0; + struct pci_dev *dev = to_pci_dev(ap->host->dev); + unsigned long flags; + unsigned int is_slave = (adev->devno != 0); + unsigned int master_port= ap->port_no ? 0x42 : 0x40; + unsigned int slave_port = 0x44; + u16 master_data; + u8 slave_data; + u8 udma_enable; + int control = 0; + + static const /* ISP RTC */ + u8 timings[][2] = { { 0, 0 }, + { 0, 0 }, + { 1, 0 }, + { 2, 1 }, + { 2, 3 }, }; + + if (pio >= 2) + control |= 1; /* TIME1 enable */ + if (ata_pio_need_iordy(adev)) + control |= 2; /* IE enable */ + + if (adev->class == ATA_DEV_ATA) + control |= 4; /* PPE enable */ + + spin_lock_irqsave(&rdc_lock, flags); + + /* PIO configuration clears DTE unconditionally. It will be + * programmed in set_dmamode which is guaranteed to be called + * after set_piomode if any DMA mode is available. + */ + pci_read_config_word(dev, master_port, &master_data); + if (is_slave) { + /* clear TIME1|IE1|PPE1|DTE1 */ + master_data &= 0xff0f; + /* Enable SITRE (separate slave timing register) */ + master_data |= 0x4000; + /* enable PPE1, IE1 and TIME1 as needed */ + master_data |= (control << 4); + pci_read_config_byte(dev, slave_port, &slave_data); + slave_data &= (ap->port_no ? 0x0f : 0xf0); + /* Load the timing nibble for this slave */ + slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) + << (ap->port_no ? 4 : 0); + } else { + /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */ + master_data &= 0xccf0; + /* Enable PPE, IE and TIME as appropriate */ + master_data |= control; + /* load ISP and RCT */ + master_data |= + (timings[pio][0] << 12) | + (timings[pio][1] << 8); + } + pci_write_config_word(dev, master_port, master_data); + if (is_slave) + pci_write_config_byte(dev, slave_port, slave_data); + + /* Ensure the UDMA bit is off - it will be turned back on if + UDMA is selected */ + + pci_read_config_byte(dev, 0x48, &udma_enable); + udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); + pci_write_config_byte(dev, 0x48, udma_enable); + + spin_unlock_irqrestore(&rdc_lock, flags); +} + +/** + * rdc_set_dmamode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: Drive in question + * + * Set UDMA mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev) +{ + struct pci_dev *dev = to_pci_dev(ap->host->dev); + unsigned long flags; + u8 master_port = ap->port_no ? 0x42 : 0x40; + u16 master_data; + u8 speed = adev->dma_mode; + int devid = adev->devno + 2 * ap->port_no; + u8 udma_enable = 0; + + static const /* ISP RTC */ + u8 timings[][2] = { { 0, 0 }, + { 0, 0 }, + { 1, 0 }, + { 2, 1 }, + { 2, 3 }, }; + + spin_lock_irqsave(&rdc_lock, flags); + + pci_read_config_word(dev, master_port, &master_data); + pci_read_config_byte(dev, 0x48, &udma_enable); + + if (speed >= XFER_UDMA_0) { + unsigned int udma = adev->dma_mode - XFER_UDMA_0; + u16 udma_timing; + u16 ideconf; + int u_clock, u_speed; + + /* + * UDMA is handled by a combination of clock switching and + * selection of dividers + * + * Handy rule: Odd modes are UDMATIMx 01, even are 02 + * except UDMA0 which is 00 + */ + u_speed = min(2 - (udma & 1), udma); + if (udma == 5) + u_clock = 0x1000; /* 100Mhz */ + else if (udma > 2) + u_clock = 1; /* 66Mhz */ + else + u_clock = 0; /* 33Mhz */ + + udma_enable |= (1 << devid); + + /* Load the CT/RP selection */ + pci_read_config_word(dev, 0x4A, &udma_timing); + udma_timing &= ~(3 << (4 * devid)); + udma_timing |= u_speed << (4 * devid); + pci_write_config_word(dev, 0x4A, udma_timing); + + /* Select a 33/66/100Mhz clock */ + pci_read_config_word(dev, 0x54, &ideconf); + ideconf &= ~(0x1001 << devid); + ideconf |= u_clock << devid; + pci_write_config_word(dev, 0x54, ideconf); + } else { + /* + * MWDMA is driven by the PIO timings. We must also enable + * IORDY unconditionally along with TIME1. PPE has already + * been set when the PIO timing was set. + */ + unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0; + unsigned int control; + u8 slave_data; + const unsigned int needed_pio[3] = { + XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 + }; + int pio = needed_pio[mwdma] - XFER_PIO_0; + + control = 3; /* IORDY|TIME1 */ + + /* If the drive MWDMA is faster than it can do PIO then + we must force PIO into PIO0 */ + + if (adev->pio_mode < needed_pio[mwdma]) + /* Enable DMA timing only */ + control |= 8; /* PIO cycles in PIO0 */ + + if (adev->devno) { /* Slave */ + master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */ + master_data |= control << 4; + pci_read_config_byte(dev, 0x44, &slave_data); + slave_data &= (ap->port_no ? 0x0f : 0xf0); + /* Load the matching timing */ + slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); + pci_write_config_byte(dev, 0x44, slave_data); + } else { /* Master */ + master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY + and master timing bits */ + master_data |= control; + master_data |= + (timings[pio][0] << 12) | + (timings[pio][1] << 8); + } + + udma_enable &= ~(1 << devid); + pci_write_config_word(dev, master_port, master_data); + } + pci_write_config_byte(dev, 0x48, udma_enable); + + spin_unlock_irqrestore(&rdc_lock, flags); +} + +static struct ata_port_operations rdc_pata_ops = { + .inherits = &ata_bmdma32_port_ops, + .cable_detect = rdc_pata_cable_detect, + .set_piomode = rdc_set_piomode, + .set_dmamode = rdc_set_dmamode, + .prereset = rdc_pata_prereset, +}; + +static struct ata_port_info rdc_port_info = { + + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA12_ONLY, + .udma_mask = ATA_UDMA5, + .port_ops = &rdc_pata_ops, +}; + +static struct scsi_host_template rdc_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +/** + * rdc_init_one - Register PIIX ATA PCI device with kernel services + * @pdev: PCI device to register + * @ent: Entry in rdc_pci_tbl matching with @pdev + * + * Called from kernel PCI layer. We probe for combined mode (sigh), + * and then hand over control to libata, for it to do the rest. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, or -ERRNO value. + */ + +static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct ata_port_info port_info[2]; + const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] }; + struct ata_host *host; + struct rdc_host_priv *hpriv; + int rc; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + port_info[0] = rdc_port_info; + port_info[1] = rdc_port_info; + + /* enable device and prepare host */ + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + + /* Save IOCFG, this will be used for cable detection, quirk + * detection and restoration on detach. + */ + pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg); + + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); + if (rc) + return rc; + host->private_data = hpriv; + + pci_intx(pdev, 1); + + host->flags |= ATA_HOST_PARALLEL_SCAN; + + pci_set_master(pdev); + return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht); +} + +static void rdc_remove_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + struct rdc_host_priv *hpriv = host->private_data; + + pci_write_config_dword(pdev, 0x54, hpriv->saved_iocfg); + + ata_pci_remove_one(pdev); +} + +static const struct pci_device_id rdc_pci_tbl[] = { + { PCI_DEVICE(0x17F3, 0x1011), }, + { PCI_DEVICE(0x17F3, 0x1012), }, + { } /* terminate list */ +}; + +static struct pci_driver rdc_pci_driver = { + .name = DRV_NAME, + .id_table = rdc_pci_tbl, + .probe = rdc_init_one, + .remove = rdc_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif +}; + + +module_pci_driver(rdc_pci_driver); + +MODULE_AUTHOR("Alan Cox (based on ata_piix)"); +MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, rdc_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c index eccc6fd4503..b3ec18c6bcc 100644 --- a/drivers/ata/pata_rz1000.c +++ b/drivers/ata/pata_rz1000.c @@ -14,120 +14,64 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_rz1000" -#define DRV_VERSION "0.2.2" +#define DRV_VERSION "0.2.4" /** - * rz1000_prereset - probe begin - * @ap: ATA port - * - * Set up cable type and use generics - */ - -static int rz1000_prereset(struct ata_port *ap) -{ - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - -/** - * rz1000_error_handler - probe reset - * @ap: ATA port - * - * Perform the ATA standard reset sequence - */ - -static void rz1000_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, rz1000_prereset, ata_std_softreset, NULL, ata_std_postreset); -} - -/** * rz1000_set_mode - mode setting function - * @ap: ATA interface + * @link: ATA link + * @unused: returned device on set_mode failure * * Use a non standard set_mode function. We don't want to be tuned. We * would prefer to be BIOS generic but for the fact our hardware is * whacked out. */ -static void rz1000_set_mode(struct ata_port *ap) +static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused) { - int i; - - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; - if (ata_dev_enabled(dev)) { - /* We don't really care */ - dev->pio_mode = XFER_PIO_0; - dev->xfer_mode = XFER_PIO_0; - dev->xfer_shift = ATA_SHIFT_PIO; - dev->flags |= ATA_DFLAG_PIO; - } + struct ata_device *dev; + + ata_for_each_dev(dev, link, ENABLED) { + /* We don't really care */ + dev->pio_mode = XFER_PIO_0; + dev->xfer_mode = XFER_PIO_0; + dev->xfer_shift = ATA_SHIFT_PIO; + dev->flags |= ATA_DFLAG_PIO; + ata_dev_info(dev, "configured for PIO\n"); } + return 0; } static struct scsi_host_template rz1000_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations rz1000_port_ops = { + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, .set_mode = rz1000_set_mode, - - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .error_handler = rz1000_error_handler, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = rz1000_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; +static int rz1000_fifo_disable(struct pci_dev *pdev) +{ + u16 reg; + /* Be exceptionally paranoid as we must be sure to apply the fix */ + if (pci_read_config_word(pdev, 0x40, ®) != 0) + return -1; + reg &= 0xDFFF; + if (pci_write_config_word(pdev, 0x40, reg) != 0) + return -1; + printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n"); + return 0; +} + /** * rz1000_init_one - Register RZ1000 ATA PCI device with kernel services * @pdev: PCI device to register @@ -140,66 +84,65 @@ static struct ata_port_operations rz1000_port_ops = { static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - struct ata_port_info *port_info[2]; - u16 reg; - static struct ata_port_info info = { - .sht = &rz1000_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, .port_ops = &rz1000_port_ops }; + const struct ata_port_info *ppi[] = { &info, NULL }; - if (!printed_version++) - printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - /* Be exceptionally paranoid as we must be sure to apply the fix */ - if (pci_read_config_word(pdev, 0x40, ®) != 0) - goto fail; - reg &= 0xDFFF; - if (pci_write_config_word(pdev, 0x40, reg) != 0) - goto fail; - printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n"); + if (rz1000_fifo_disable(pdev) == 0) + return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL, 0); - port_info[0] = &info; - port_info[1] = &info; - return ata_pci_init_one(pdev, port_info, 2); -fail: printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n"); /* Not safe to use so skip */ return -ENODEV; } -static struct pci_device_id pata_rz1000[] = { - { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), }, - { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), }, - { 0, }, +#ifdef CONFIG_PM_SLEEP +static int rz1000_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + /* If this fails on resume (which is a "can't happen" case), we + must stop as any progress risks data loss */ + if (rz1000_fifo_disable(pdev)) + panic("rz1000 fifo"); + + ata_host_resume(host); + return 0; +} +#endif + +static const struct pci_device_id pata_rz1000[] = { + { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), }, + { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), }, + + { }, }; static struct pci_driver rz1000_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = pata_rz1000, .probe = rz1000_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = rz1000_reinit_one, +#endif }; - -static int __init rz1000_init(void) -{ - return pci_register_driver(&rz1000_pci_driver); -} - -static void __exit rz1000_exit(void) -{ - pci_unregister_driver(&rz1000_pci_driver); -} +module_pci_driver(rz1000_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for RZ1000 PCI ATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pata_rz1000); MODULE_VERSION(DRV_VERSION); - -module_init(rz1000_init); -module_exit(rz1000_exit); - diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c new file mode 100644 index 00000000000..fb528831fb9 --- /dev/null +++ b/drivers/ata/pata_samsung_cf.c @@ -0,0 +1,684 @@ +/* + * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * PATA driver for Samsung SoCs. + * Supports CF Interface in True IDE mode. Currently only PIO mode has been + * implemented; UDMA support has to be added. + * + * Based on: + * PATA driver for AT91SAM9260 Static Memory Controller + * PATA driver for Toshiba SCC controller + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. +*/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/clk.h> +#include <linux/libata.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <linux/platform_data/ata-samsung_cf.h> + +#define DRV_NAME "pata_samsung_cf" +#define DRV_VERSION "0.1" + +#define S3C_CFATA_REG(x) (x) +#define S3C_CFATA_MUX S3C_CFATA_REG(0x0) +#define S3C_ATA_CTRL S3C_CFATA_REG(0x0) +#define S3C_ATA_CMD S3C_CFATA_REG(0x8) +#define S3C_ATA_IRQ S3C_CFATA_REG(0x10) +#define S3C_ATA_IRQ_MSK S3C_CFATA_REG(0x14) +#define S3C_ATA_CFG S3C_CFATA_REG(0x18) + +#define S3C_ATA_PIO_TIME S3C_CFATA_REG(0x2c) +#define S3C_ATA_PIO_DTR S3C_CFATA_REG(0x54) +#define S3C_ATA_PIO_FED S3C_CFATA_REG(0x58) +#define S3C_ATA_PIO_SCR S3C_CFATA_REG(0x5c) +#define S3C_ATA_PIO_LLR S3C_CFATA_REG(0x60) +#define S3C_ATA_PIO_LMR S3C_CFATA_REG(0x64) +#define S3C_ATA_PIO_LHR S3C_CFATA_REG(0x68) +#define S3C_ATA_PIO_DVR S3C_CFATA_REG(0x6c) +#define S3C_ATA_PIO_CSD S3C_CFATA_REG(0x70) +#define S3C_ATA_PIO_DAD S3C_CFATA_REG(0x74) +#define S3C_ATA_PIO_RDATA S3C_CFATA_REG(0x7c) + +#define S3C_CFATA_MUX_TRUEIDE 0x01 +#define S3C_ATA_CFG_SWAP 0x40 +#define S3C_ATA_CFG_IORDYEN 0x02 + +enum s3c_cpu_type { + TYPE_S3C64XX, + TYPE_S5PC100, + TYPE_S5PV210, +}; + +/* + * struct s3c_ide_info - S3C PATA instance. + * @clk: The clock resource for this controller. + * @ide_addr: The area mapped for the hardware registers. + * @sfr_addr: The area mapped for the special function registers. + * @irq: The IRQ number we are using. + * @cpu_type: The exact type of this controller. + * @fifo_status_reg: The ATA_FIFO_STATUS register offset. + */ +struct s3c_ide_info { + struct clk *clk; + void __iomem *ide_addr; + void __iomem *sfr_addr; + unsigned int irq; + enum s3c_cpu_type cpu_type; + unsigned int fifo_status_reg; +}; + +static void pata_s3c_set_endian(void __iomem *s3c_ide_regbase, u8 mode) +{ + u32 reg = readl(s3c_ide_regbase + S3C_ATA_CFG); + reg = mode ? (reg & ~S3C_ATA_CFG_SWAP) : (reg | S3C_ATA_CFG_SWAP); + writel(reg, s3c_ide_regbase + S3C_ATA_CFG); +} + +static void pata_s3c_cfg_mode(void __iomem *s3c_ide_sfrbase) +{ + /* Select true-ide as the internal operating mode */ + writel(readl(s3c_ide_sfrbase + S3C_CFATA_MUX) | S3C_CFATA_MUX_TRUEIDE, + s3c_ide_sfrbase + S3C_CFATA_MUX); +} + +static unsigned long +pata_s3c_setup_timing(struct s3c_ide_info *info, const struct ata_timing *ata) +{ + int t1 = ata->setup; + int t2 = ata->act8b; + int t2i = ata->rec8b; + ulong piotime; + + piotime = ((t2i & 0xff) << 12) | ((t2 & 0xff) << 4) | (t1 & 0xf); + + return piotime; +} + +static void pata_s3c_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + struct s3c_ide_info *info = ap->host->private_data; + struct ata_timing timing; + int cycle_time; + ulong ata_cfg = readl(info->ide_addr + S3C_ATA_CFG); + ulong piotime; + + /* Enables IORDY if mode requires it */ + if (ata_pio_need_iordy(adev)) + ata_cfg |= S3C_ATA_CFG_IORDYEN; + else + ata_cfg &= ~S3C_ATA_CFG_IORDYEN; + + cycle_time = (int)(1000000000UL / clk_get_rate(info->clk)); + + ata_timing_compute(adev, adev->pio_mode, &timing, + cycle_time * 1000, 0); + + piotime = pata_s3c_setup_timing(info, &timing); + + writel(ata_cfg, info->ide_addr + S3C_ATA_CFG); + writel(piotime, info->ide_addr + S3C_ATA_PIO_TIME); +} + +/* + * Waits until the IDE controller is able to perform next read/write + * operation to the disk. Needed for 64XX series boards only. + */ +static int wait_for_host_ready(struct s3c_ide_info *info) +{ + ulong timeout; + void __iomem *fifo_reg = info->ide_addr + info->fifo_status_reg; + + /* wait for maximum of 20 msec */ + timeout = jiffies + msecs_to_jiffies(20); + while (time_before(jiffies, timeout)) { + if ((readl(fifo_reg) >> 28) == 0) + return 0; + } + return -EBUSY; +} + +/* + * Writes to one of the task file registers. + */ +static void ata_outb(struct ata_host *host, u8 addr, void __iomem *reg) +{ + struct s3c_ide_info *info = host->private_data; + + wait_for_host_ready(info); + writeb(addr, reg); +} + +/* + * Reads from one of the task file registers. + */ +static u8 ata_inb(struct ata_host *host, void __iomem *reg) +{ + struct s3c_ide_info *info = host->private_data; + u8 temp; + + wait_for_host_ready(info); + (void) readb(reg); + wait_for_host_ready(info); + temp = readb(info->ide_addr + S3C_ATA_PIO_RDATA); + return temp; +} + +/* + * pata_s3c_tf_load - send taskfile registers to host controller + */ +static void pata_s3c_tf_load(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + ata_outb(ap->host, tf->hob_feature, ioaddr->feature_addr); + ata_outb(ap->host, tf->hob_nsect, ioaddr->nsect_addr); + ata_outb(ap->host, tf->hob_lbal, ioaddr->lbal_addr); + ata_outb(ap->host, tf->hob_lbam, ioaddr->lbam_addr); + ata_outb(ap->host, tf->hob_lbah, ioaddr->lbah_addr); + } + + if (is_addr) { + ata_outb(ap->host, tf->feature, ioaddr->feature_addr); + ata_outb(ap->host, tf->nsect, ioaddr->nsect_addr); + ata_outb(ap->host, tf->lbal, ioaddr->lbal_addr); + ata_outb(ap->host, tf->lbam, ioaddr->lbam_addr); + ata_outb(ap->host, tf->lbah, ioaddr->lbah_addr); + } + + if (tf->flags & ATA_TFLAG_DEVICE) + ata_outb(ap->host, tf->device, ioaddr->device_addr); + + ata_wait_idle(ap); +} + +/* + * pata_s3c_tf_read - input device's ATA taskfile shadow registers + */ +static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->feature = ata_inb(ap->host, ioaddr->error_addr); + tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr); + tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr); + tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr); + tf->lbah = ata_inb(ap->host, ioaddr->lbah_addr); + tf->device = ata_inb(ap->host, ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + ata_outb(ap->host, tf->ctl | ATA_HOB, ioaddr->ctl_addr); + tf->hob_feature = ata_inb(ap->host, ioaddr->error_addr); + tf->hob_nsect = ata_inb(ap->host, ioaddr->nsect_addr); + tf->hob_lbal = ata_inb(ap->host, ioaddr->lbal_addr); + tf->hob_lbam = ata_inb(ap->host, ioaddr->lbam_addr); + tf->hob_lbah = ata_inb(ap->host, ioaddr->lbah_addr); + ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; + } +} + +/* + * pata_s3c_exec_command - issue ATA command to host controller + */ +static void pata_s3c_exec_command(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + ata_outb(ap->host, tf->command, ap->ioaddr.command_addr); + ata_sff_pause(ap); +} + +/* + * pata_s3c_check_status - Read device status register + */ +static u8 pata_s3c_check_status(struct ata_port *ap) +{ + return ata_inb(ap->host, ap->ioaddr.status_addr); +} + +/* + * pata_s3c_check_altstatus - Read alternate device status register + */ +static u8 pata_s3c_check_altstatus(struct ata_port *ap) +{ + return ata_inb(ap->host, ap->ioaddr.altstatus_addr); +} + +/* + * pata_s3c_data_xfer - Transfer data by PIO + */ +static unsigned int pata_s3c_data_xfer(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw) +{ + struct ata_port *ap = dev->link->ap; + struct s3c_ide_info *info = ap->host->private_data; + void __iomem *data_addr = ap->ioaddr.data_addr; + unsigned int words = buflen >> 1, i; + u16 *data_ptr = (u16 *)buf; + + /* Requires wait same as in ata_inb/ata_outb */ + if (rw == READ) + for (i = 0; i < words; i++, data_ptr++) { + wait_for_host_ready(info); + (void) readw(data_addr); + wait_for_host_ready(info); + *data_ptr = readw(info->ide_addr + + S3C_ATA_PIO_RDATA); + } + else + for (i = 0; i < words; i++, data_ptr++) { + wait_for_host_ready(info); + writew(*data_ptr, data_addr); + } + + if (buflen & 0x01) + dev_err(ap->dev, "unexpected trailing data\n"); + + return words << 1; +} + +/* + * pata_s3c_dev_select - Select device on ATA bus + */ +static void pata_s3c_dev_select(struct ata_port *ap, unsigned int device) +{ + u8 tmp = ATA_DEVICE_OBS; + + if (device != 0) + tmp |= ATA_DEV1; + + ata_outb(ap->host, tmp, ap->ioaddr.device_addr); + ata_sff_pause(ap); +} + +/* + * pata_s3c_devchk - PATA device presence detection + */ +static unsigned int pata_s3c_devchk(struct ata_port *ap, + unsigned int device) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 nsect, lbal; + + pata_s3c_dev_select(ap, device); + + ata_outb(ap->host, 0x55, ioaddr->nsect_addr); + ata_outb(ap->host, 0xaa, ioaddr->lbal_addr); + + ata_outb(ap->host, 0xaa, ioaddr->nsect_addr); + ata_outb(ap->host, 0x55, ioaddr->lbal_addr); + + ata_outb(ap->host, 0x55, ioaddr->nsect_addr); + ata_outb(ap->host, 0xaa, ioaddr->lbal_addr); + + nsect = ata_inb(ap->host, ioaddr->nsect_addr); + lbal = ata_inb(ap->host, ioaddr->lbal_addr); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} + +/* + * pata_s3c_wait_after_reset - wait for devices to become ready after reset + */ +static int pata_s3c_wait_after_reset(struct ata_link *link, + unsigned long deadline) +{ + int rc; + + ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); + + /* always check readiness of the master device */ + rc = ata_sff_wait_ready(link, deadline); + /* -ENODEV means the odd clown forgot the D7 pulldown resistor + * and TF status is 0xff, bail out on it too. + */ + if (rc) + return rc; + + return 0; +} + +/* + * pata_s3c_bus_softreset - PATA device software reset + */ +static unsigned int pata_s3c_bus_softreset(struct ata_port *ap, + unsigned long deadline) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + /* software reset. causes dev0 to be selected */ + ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr); + udelay(20); + ata_outb(ap->host, ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(20); + ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr); + ap->last_ctl = ap->ctl; + + return pata_s3c_wait_after_reset(&ap->link, deadline); +} + +/* + * pata_s3c_softreset - reset host port via ATA SRST + */ +static int pata_s3c_softreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + unsigned int devmask = 0; + int rc; + u8 err; + + /* determine if device 0 is present */ + if (pata_s3c_devchk(ap, 0)) + devmask |= (1 << 0); + + /* select device 0 again */ + pata_s3c_dev_select(ap, 0); + + /* issue bus reset */ + rc = pata_s3c_bus_softreset(ap, deadline); + /* if link is occupied, -ENODEV too is an error */ + if (rc && rc != -ENODEV) { + ata_link_err(link, "SRST failed (errno=%d)\n", rc); + return rc; + } + + /* determine by signature whether we have ATA or ATAPI devices */ + classes[0] = ata_sff_dev_classify(&ap->link.device[0], + devmask & (1 << 0), &err); + + return 0; +} + +/* + * pata_s3c_set_devctl - Write device control register + */ +static void pata_s3c_set_devctl(struct ata_port *ap, u8 ctl) +{ + ata_outb(ap->host, ctl, ap->ioaddr.ctl_addr); +} + +static struct scsi_host_template pata_s3c_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static struct ata_port_operations pata_s3c_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_check_status = pata_s3c_check_status, + .sff_check_altstatus = pata_s3c_check_altstatus, + .sff_tf_load = pata_s3c_tf_load, + .sff_tf_read = pata_s3c_tf_read, + .sff_data_xfer = pata_s3c_data_xfer, + .sff_exec_command = pata_s3c_exec_command, + .sff_dev_select = pata_s3c_dev_select, + .sff_set_devctl = pata_s3c_set_devctl, + .softreset = pata_s3c_softreset, + .set_piomode = pata_s3c_set_piomode, +}; + +static struct ata_port_operations pata_s5p_port_ops = { + .inherits = &ata_sff_port_ops, + .set_piomode = pata_s3c_set_piomode, +}; + +static void pata_s3c_enable(void __iomem *s3c_ide_regbase, bool state) +{ + u32 temp = readl(s3c_ide_regbase + S3C_ATA_CTRL); + temp = state ? (temp | 1) : (temp & ~1); + writel(temp, s3c_ide_regbase + S3C_ATA_CTRL); +} + +static irqreturn_t pata_s3c_irq(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + struct s3c_ide_info *info = host->private_data; + u32 reg; + + reg = readl(info->ide_addr + S3C_ATA_IRQ); + writel(reg, info->ide_addr + S3C_ATA_IRQ); + + return ata_sff_interrupt(irq, dev_instance); +} + +static void pata_s3c_hwinit(struct s3c_ide_info *info, + struct s3c_ide_platdata *pdata) +{ + switch (info->cpu_type) { + case TYPE_S3C64XX: + /* Configure as big endian */ + pata_s3c_cfg_mode(info->sfr_addr); + pata_s3c_set_endian(info->ide_addr, 1); + pata_s3c_enable(info->ide_addr, true); + msleep(100); + + /* Remove IRQ Status */ + writel(0x1f, info->ide_addr + S3C_ATA_IRQ); + writel(0x1b, info->ide_addr + S3C_ATA_IRQ_MSK); + break; + + case TYPE_S5PC100: + pata_s3c_cfg_mode(info->sfr_addr); + /* FALLTHROUGH */ + + case TYPE_S5PV210: + /* Configure as little endian */ + pata_s3c_set_endian(info->ide_addr, 0); + pata_s3c_enable(info->ide_addr, true); + msleep(100); + + /* Remove IRQ Status */ + writel(0x3f, info->ide_addr + S3C_ATA_IRQ); + writel(0x3f, info->ide_addr + S3C_ATA_IRQ_MSK); + break; + + default: + BUG(); + } +} + +static int __init pata_s3c_probe(struct platform_device *pdev) +{ + struct s3c_ide_platdata *pdata = dev_get_platdata(&pdev->dev); + struct device *dev = &pdev->dev; + struct s3c_ide_info *info; + struct resource *res; + struct ata_port *ap; + struct ata_host *host; + enum s3c_cpu_type cpu_type; + int ret; + + cpu_type = platform_get_device_id(pdev)->driver_data; + + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); + if (!info) { + dev_err(dev, "failed to allocate memory for device data\n"); + return -ENOMEM; + } + + info->irq = platform_get_irq(pdev, 0); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + info->ide_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(info->ide_addr)) + return PTR_ERR(info->ide_addr); + + info->clk = devm_clk_get(&pdev->dev, "cfcon"); + if (IS_ERR(info->clk)) { + dev_err(dev, "failed to get access to cf controller clock\n"); + ret = PTR_ERR(info->clk); + info->clk = NULL; + return ret; + } + + clk_enable(info->clk); + + /* init ata host */ + host = ata_host_alloc(dev, 1); + if (!host) { + dev_err(dev, "failed to allocate ide host\n"); + ret = -ENOMEM; + goto stop_clk; + } + + ap = host->ports[0]; + ap->pio_mask = ATA_PIO4; + + if (cpu_type == TYPE_S3C64XX) { + ap->ops = &pata_s3c_port_ops; + info->sfr_addr = info->ide_addr + 0x1800; + info->ide_addr += 0x1900; + info->fifo_status_reg = 0x94; + } else if (cpu_type == TYPE_S5PC100) { + ap->ops = &pata_s5p_port_ops; + info->sfr_addr = info->ide_addr + 0x1800; + info->ide_addr += 0x1900; + info->fifo_status_reg = 0x84; + } else { + ap->ops = &pata_s5p_port_ops; + info->fifo_status_reg = 0x84; + } + + info->cpu_type = cpu_type; + + if (info->irq <= 0) { + ap->flags |= ATA_FLAG_PIO_POLLING; + info->irq = 0; + ata_port_desc(ap, "no IRQ, using PIO polling\n"); + } + + ap->ioaddr.cmd_addr = info->ide_addr + S3C_ATA_CMD; + ap->ioaddr.data_addr = info->ide_addr + S3C_ATA_PIO_DTR; + ap->ioaddr.error_addr = info->ide_addr + S3C_ATA_PIO_FED; + ap->ioaddr.feature_addr = info->ide_addr + S3C_ATA_PIO_FED; + ap->ioaddr.nsect_addr = info->ide_addr + S3C_ATA_PIO_SCR; + ap->ioaddr.lbal_addr = info->ide_addr + S3C_ATA_PIO_LLR; + ap->ioaddr.lbam_addr = info->ide_addr + S3C_ATA_PIO_LMR; + ap->ioaddr.lbah_addr = info->ide_addr + S3C_ATA_PIO_LHR; + ap->ioaddr.device_addr = info->ide_addr + S3C_ATA_PIO_DVR; + ap->ioaddr.status_addr = info->ide_addr + S3C_ATA_PIO_CSD; + ap->ioaddr.command_addr = info->ide_addr + S3C_ATA_PIO_CSD; + ap->ioaddr.altstatus_addr = info->ide_addr + S3C_ATA_PIO_DAD; + ap->ioaddr.ctl_addr = info->ide_addr + S3C_ATA_PIO_DAD; + + ata_port_desc(ap, "mmio cmd 0x%llx ", + (unsigned long long)res->start); + + host->private_data = info; + + if (pdata && pdata->setup_gpio) + pdata->setup_gpio(); + + /* Set endianness and enable the interface */ + pata_s3c_hwinit(info, pdata); + + platform_set_drvdata(pdev, host); + + ret = ata_host_activate(host, info->irq, + info->irq ? pata_s3c_irq : NULL, + 0, &pata_s3c_sht); + if (ret) + goto stop_clk; + + return 0; + +stop_clk: + clk_disable(info->clk); + return ret; +} + +static int __exit pata_s3c_remove(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + struct s3c_ide_info *info = host->private_data; + + ata_host_detach(host); + + clk_disable(info->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int pata_s3c_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ata_host *host = platform_get_drvdata(pdev); + + return ata_host_suspend(host, PMSG_SUSPEND); +} + +static int pata_s3c_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ata_host *host = platform_get_drvdata(pdev); + struct s3c_ide_platdata *pdata = dev_get_platdata(&pdev->dev); + struct s3c_ide_info *info = host->private_data; + + pata_s3c_hwinit(info, pdata); + ata_host_resume(host); + + return 0; +} + +static const struct dev_pm_ops pata_s3c_pm_ops = { + .suspend = pata_s3c_suspend, + .resume = pata_s3c_resume, +}; +#endif + +/* driver device registration */ +static struct platform_device_id pata_s3c_driver_ids[] = { + { + .name = "s3c64xx-pata", + .driver_data = TYPE_S3C64XX, + }, { + .name = "s5pc100-pata", + .driver_data = TYPE_S5PC100, + }, { + .name = "s5pv210-pata", + .driver_data = TYPE_S5PV210, + }, + { } +}; + +MODULE_DEVICE_TABLE(platform, pata_s3c_driver_ids); + +static struct platform_driver pata_s3c_driver = { + .remove = __exit_p(pata_s3c_remove), + .id_table = pata_s3c_driver_ids, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, +#ifdef CONFIG_PM_SLEEP + .pm = &pata_s3c_pm_ops, +#endif + }, +}; + +module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe); + +MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>"); +MODULE_DESCRIPTION("low-level driver for Samsung PATA controller"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c index 107e6cd3dc0..c71de5d680d 100644 --- a/drivers/ata/pata_sc1200.c +++ b/drivers/ata/pata_sc1200.c @@ -1,8 +1,7 @@ /* - * New ATA layer SC1200 driver Alan Cox <alan@redhat.com> + * New ATA layer SC1200 driver Alan Cox <alan@lxorguk.ukuu.org.uk> * * TODO: Mode selection filtering - * TODO: Can't enable second channel until ATA core has serialize * TODO: Needs custom DMA cleanup code * * Based very heavily on @@ -33,14 +32,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> -#define DRV_NAME "sc1200" -#define DRV_VERSION "0.2.3" +#define DRV_NAME "pata_sc1200" +#define DRV_VERSION "0.2.6" #define SC1200_REV_A 0x00 #define SC1200_REV_B1 0x01 @@ -87,10 +85,14 @@ static int sc1200_clock(void) static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev) { static const u32 pio_timings[4][5] = { - {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, // format0 33Mhz - {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}, // format1, 33Mhz - {0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021}, // format1, 48Mhz - {0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131} // format1, 66Mhz + /* format0, 33Mhz */ + { 0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010 }, + /* format1, 33Mhz */ + { 0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010 }, + /* format1, 48Mhz */ + { 0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021 }, + /* format1, 66Mhz */ + { 0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131 } }; struct pci_dev *pdev = to_pci_dev(ap->host->dev); @@ -151,81 +153,71 @@ static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev) } /** - * sc1200_qc_issue_prot - command issue + * sc1200_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if - * neccessary. Specifically we have a problem that there is only + * necessary. Specifically we have a problem that there is only * one MWDMA/UDMA bit. */ -static unsigned int sc1200_qc_issue_prot(struct ata_queued_cmd *qc) +static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct ata_device *prev = ap->private_data; /* See if the DMA settings could be wrong */ - if (adev->dma_mode != 0 && adev != prev && prev != NULL) { + if (ata_dma_enabled(adev) && adev != prev && prev != NULL) { /* Maybe, but do the channels match MWDMA/UDMA ? */ - if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) || - (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0)) + if ((ata_using_udma(adev) && !ata_using_udma(prev)) || + (ata_using_udma(prev) && !ata_using_udma(adev))) /* Switch the mode bits */ sc1200_set_dmamode(ap, adev); } - return ata_qc_issue_prot(qc); + return ata_bmdma_qc_issue(qc); +} + +/** + * sc1200_qc_defer - implement serialization + * @qc: command + * + * Serialize command issue on this controller. + */ + +static int sc1200_qc_defer(struct ata_queued_cmd *qc) +{ + struct ata_host *host = qc->ap->host; + struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; + int rc; + + /* First apply the usual rules */ + rc = ata_std_qc_defer(qc); + if (rc != 0) + return rc; + + /* Now apply serialization rules. Only allow a command if the + other channel state machine is idle */ + if (alt && alt->qc_active) + return ATA_DEFER_PORT; + return 0; } static struct scsi_host_template sc1200_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), + .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; static struct ata_port_operations sc1200_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_bmdma_port_ops, + .qc_prep = ata_bmdma_dumb_qc_prep, + .qc_issue = sc1200_qc_issue, + .qc_defer = sc1200_qc_defer, + .cable_detect = ata_cable_40wire, .set_piomode = sc1200_set_piomode, .set_dmamode = sc1200_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .error_handler = ata_bmdma_error_handler, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = sc1200_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; /** @@ -239,49 +231,39 @@ static struct ata_port_operations sc1200_port_ops = { static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - static struct ata_port_info info = { - .sht = &sc1200_sht, - .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x07, + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA2, .port_ops = &sc1200_port_ops }; - static struct ata_port_info *port_info[2] = { &info, &info }; + const struct ata_port_info *ppi[] = { &info, NULL }; - /* Can't enable port 2 yet, see top comments */ - return ata_pci_init_one(dev, port_info, 1); + return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0); } -static struct pci_device_id sc1200[] = { - { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_IDE), }, - { 0, }, +static const struct pci_device_id sc1200[] = { + { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_IDE), }, + + { }, }; static struct pci_driver sc1200_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = sc1200, .probe = sc1200_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; -static int __init sc1200_init(void) -{ - return pci_register_driver(&sc1200_pci_driver); -} - - -static void __exit sc1200_exit(void) -{ - pci_unregister_driver(&sc1200_pci_driver); -} - +module_pci_driver(sc1200_pci_driver); MODULE_AUTHOR("Alan Cox, Mark Lord"); MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sc1200); MODULE_VERSION(DRV_VERSION); - -module_init(sc1200_init); -module_exit(sc1200_exit); diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c new file mode 100644 index 00000000000..4e006d74bef --- /dev/null +++ b/drivers/ata/pata_scc.c @@ -0,0 +1,1111 @@ +/* + * Support for IDE interfaces on Celleb platform + * + * (C) Copyright 2006 TOSHIBA CORPORATION + * + * This code is based on drivers/ata/ata_piix.c: + * Copyright 2003-2005 Red Hat Inc + * Copyright 2003-2005 Jeff Garzik + * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer + * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> + * Copyright (C) 2003 Red Hat Inc + * + * and drivers/ata/ahci.c: + * Copyright 2004-2005 Red Hat, Inc. + * + * and drivers/ata/libata-core.c: + * Copyright 2003-2004 Red Hat, Inc. All rights reserved. + * Copyright 2003-2004 Jeff Garzik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> + +#define DRV_NAME "pata_scc" +#define DRV_VERSION "0.3" + +#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4 + +/* PCI BARs */ +#define SCC_CTRL_BAR 0 +#define SCC_BMID_BAR 1 + +/* offset of CTRL registers */ +#define SCC_CTL_PIOSHT 0x000 +#define SCC_CTL_PIOCT 0x004 +#define SCC_CTL_MDMACT 0x008 +#define SCC_CTL_MCRCST 0x00C +#define SCC_CTL_SDMACT 0x010 +#define SCC_CTL_SCRCST 0x014 +#define SCC_CTL_UDENVT 0x018 +#define SCC_CTL_TDVHSEL 0x020 +#define SCC_CTL_MODEREG 0x024 +#define SCC_CTL_ECMODE 0xF00 +#define SCC_CTL_MAEA0 0xF50 +#define SCC_CTL_MAEC0 0xF54 +#define SCC_CTL_CCKCTRL 0xFF0 + +/* offset of BMID registers */ +#define SCC_DMA_CMD 0x000 +#define SCC_DMA_STATUS 0x004 +#define SCC_DMA_TABLE_OFS 0x008 +#define SCC_DMA_INTMASK 0x010 +#define SCC_DMA_INTST 0x014 +#define SCC_DMA_PTERADD 0x018 +#define SCC_REG_CMD_ADDR 0x020 +#define SCC_REG_DATA 0x000 +#define SCC_REG_ERR 0x004 +#define SCC_REG_FEATURE 0x004 +#define SCC_REG_NSECT 0x008 +#define SCC_REG_LBAL 0x00C +#define SCC_REG_LBAM 0x010 +#define SCC_REG_LBAH 0x014 +#define SCC_REG_DEVICE 0x018 +#define SCC_REG_STATUS 0x01C +#define SCC_REG_CMD 0x01C +#define SCC_REG_ALTSTATUS 0x020 + +/* register value */ +#define TDVHSEL_MASTER 0x00000001 +#define TDVHSEL_SLAVE 0x00000004 + +#define MODE_JCUSFEN 0x00000080 + +#define ECMODE_VALUE 0x01 + +#define CCKCTRL_ATARESET 0x00040000 +#define CCKCTRL_BUFCNT 0x00020000 +#define CCKCTRL_CRST 0x00010000 +#define CCKCTRL_OCLKEN 0x00000100 +#define CCKCTRL_ATACLKOEN 0x00000002 +#define CCKCTRL_LCLKEN 0x00000001 + +#define QCHCD_IOS_SS 0x00000001 + +#define QCHSD_STPDIAG 0x00020000 + +#define INTMASK_MSK 0xD1000012 +#define INTSTS_SERROR 0x80000000 +#define INTSTS_PRERR 0x40000000 +#define INTSTS_RERR 0x10000000 +#define INTSTS_ICERR 0x01000000 +#define INTSTS_BMSINT 0x00000010 +#define INTSTS_BMHE 0x00000008 +#define INTSTS_IOIRQS 0x00000004 +#define INTSTS_INTRQ 0x00000002 +#define INTSTS_ACTEINT 0x00000001 + + +/* PIO transfer mode table */ +/* JCHST */ +static const unsigned long JCHSTtbl[2][7] = { + {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */ + {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */ +}; + +/* JCHHT */ +static const unsigned long JCHHTtbl[2][7] = { + {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */ + {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */ +}; + +/* JCHCT */ +static const unsigned long JCHCTtbl[2][7] = { + {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */ + {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */ +}; + +/* DMA transfer mode table */ +/* JCHDCTM/JCHDCTS */ +static const unsigned long JCHDCTxtbl[2][7] = { + {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */ + {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */ +}; + +/* JCSTWTM/JCSTWTS */ +static const unsigned long JCSTWTxtbl[2][7] = { + {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */ + {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ +}; + +/* JCTSS */ +static const unsigned long JCTSStbl[2][7] = { + {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */ + {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */ +}; + +/* JCENVT */ +static const unsigned long JCENVTtbl[2][7] = { + {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */ + {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ +}; + +/* JCACTSELS/JCACTSELM */ +static const unsigned long JCACTSELtbl[2][7] = { + {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */ + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */ +}; + +static const struct pci_device_id scc_pci_tbl[] = { + { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0}, + { } /* terminate list */ +}; + +/** + * scc_set_piomode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: um + * + * Set PIO mode for device. + * + * LOCKING: + * None (inherited from caller). + */ + +static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev) +{ + unsigned int pio = adev->pio_mode - XFER_PIO_0; + void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; + void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; + void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT; + void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT; + unsigned long reg; + int offset; + + reg = in_be32(cckctrl_port); + if (reg & CCKCTRL_ATACLKOEN) + offset = 1; /* 133MHz */ + else + offset = 0; /* 100MHz */ + + reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio]; + out_be32(piosht_port, reg); + reg = JCHCTtbl[offset][pio]; + out_be32(pioct_port, reg); +} + +/** + * scc_set_dmamode - Initialize host controller PATA DMA timings + * @ap: Port whose timings we are configuring + * @adev: um + * + * Set UDMA mode for device. + * + * LOCKING: + * None (inherited from caller). + */ + +static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev) +{ + unsigned int udma = adev->dma_mode; + unsigned int is_slave = (adev->devno != 0); + u8 speed = udma; + void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; + void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; + void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT; + void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST; + void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT; + void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST; + void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT; + void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL; + int offset, idx; + + if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN) + offset = 1; /* 133MHz */ + else + offset = 0; /* 100MHz */ + + if (speed >= XFER_UDMA_0) + idx = speed - XFER_UDMA_0; + else + return; + + if (is_slave) { + out_be32(sdmact_port, JCHDCTxtbl[offset][idx]); + out_be32(scrcst_port, JCSTWTxtbl[offset][idx]); + out_be32(tdvhsel_port, + (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2)); + } else { + out_be32(mdmact_port, JCHDCTxtbl[offset][idx]); + out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]); + out_be32(tdvhsel_port, + (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]); + } + out_be32(udenvt_port, + JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]); +} + +unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask) +{ + /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */ + if (adev->class == ATA_DEV_ATAPI && + (mask & (0xE0 << ATA_SHIFT_UDMA))) { + printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); + mask &= ~(0xE0 << ATA_SHIFT_UDMA); + } + return mask; +} + +/** + * scc_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Note: Original code is ata_sff_tf_load(). + */ + +static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + out_be32(ioaddr->ctl_addr, tf->ctl); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + out_be32(ioaddr->feature_addr, tf->hob_feature); + out_be32(ioaddr->nsect_addr, tf->hob_nsect); + out_be32(ioaddr->lbal_addr, tf->hob_lbal); + out_be32(ioaddr->lbam_addr, tf->hob_lbam); + out_be32(ioaddr->lbah_addr, tf->hob_lbah); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + if (is_addr) { + out_be32(ioaddr->feature_addr, tf->feature); + out_be32(ioaddr->nsect_addr, tf->nsect); + out_be32(ioaddr->lbal_addr, tf->lbal); + out_be32(ioaddr->lbam_addr, tf->lbam); + out_be32(ioaddr->lbah_addr, tf->lbah); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + out_be32(ioaddr->device_addr, tf->device); + VPRINTK("device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + +/** + * scc_check_status - Read device status reg & clear interrupt + * @ap: port where the device is + * + * Note: Original code is ata_check_status(). + */ + +static u8 scc_check_status (struct ata_port *ap) +{ + return in_be32(ap->ioaddr.status_addr); +} + +/** + * scc_tf_read - input device's ATA taskfile shadow registers + * @ap: Port from which input is read + * @tf: ATA taskfile register set for storing input + * + * Note: Original code is ata_sff_tf_read(). + */ + +static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->command = scc_check_status(ap); + tf->feature = in_be32(ioaddr->error_addr); + tf->nsect = in_be32(ioaddr->nsect_addr); + tf->lbal = in_be32(ioaddr->lbal_addr); + tf->lbam = in_be32(ioaddr->lbam_addr); + tf->lbah = in_be32(ioaddr->lbah_addr); + tf->device = in_be32(ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB); + tf->hob_feature = in_be32(ioaddr->error_addr); + tf->hob_nsect = in_be32(ioaddr->nsect_addr); + tf->hob_lbal = in_be32(ioaddr->lbal_addr); + tf->hob_lbam = in_be32(ioaddr->lbam_addr); + tf->hob_lbah = in_be32(ioaddr->lbah_addr); + out_be32(ioaddr->ctl_addr, tf->ctl); + ap->last_ctl = tf->ctl; + } +} + +/** + * scc_exec_command - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Note: Original code is ata_sff_exec_command(). + */ + +static void scc_exec_command (struct ata_port *ap, + const struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); + + out_be32(ap->ioaddr.command_addr, tf->command); + ata_sff_pause(ap); +} + +/** + * scc_check_altstatus - Read device alternate status reg + * @ap: port where the device is + */ + +static u8 scc_check_altstatus (struct ata_port *ap) +{ + return in_be32(ap->ioaddr.altstatus_addr); +} + +/** + * scc_dev_select - Select device 0/1 on ATA bus + * @ap: ATA channel to manipulate + * @device: ATA device (numbered from zero) to select + * + * Note: Original code is ata_sff_dev_select(). + */ + +static void scc_dev_select (struct ata_port *ap, unsigned int device) +{ + u8 tmp; + + if (device == 0) + tmp = ATA_DEVICE_OBS; + else + tmp = ATA_DEVICE_OBS | ATA_DEV1; + + out_be32(ap->ioaddr.device_addr, tmp); + ata_sff_pause(ap); +} + +/** + * scc_set_devctl - Write device control reg + * @ap: port where the device is + * @ctl: value to write + */ + +static void scc_set_devctl(struct ata_port *ap, u8 ctl) +{ + out_be32(ap->ioaddr.ctl_addr, ctl); +} + +/** + * scc_bmdma_setup - Set up PCI IDE BMDMA transaction + * @qc: Info associated with this ATA transaction. + * + * Note: Original code is ata_bmdma_setup(). + */ + +static void scc_bmdma_setup (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + u8 dmactl; + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + /* load PRD table addr */ + out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma); + + /* specify data direction, triple-check start bit is clear */ + dmactl = in_be32(mmio + SCC_DMA_CMD); + dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); + if (!rw) + dmactl |= ATA_DMA_WR; + out_be32(mmio + SCC_DMA_CMD, dmactl); + + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} + +/** + * scc_bmdma_start - Start a PCI IDE BMDMA transaction + * @qc: Info associated with this ATA transaction. + * + * Note: Original code is ata_bmdma_start(). + */ + +static void scc_bmdma_start (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + u8 dmactl; + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + /* start host DMA transaction */ + dmactl = in_be32(mmio + SCC_DMA_CMD); + out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START); +} + +/** + * scc_devchk - PATA device presence detection + * @ap: ATA channel to examine + * @device: Device to examine (starting at zero) + * + * Note: Original code is ata_devchk(). + */ + +static unsigned int scc_devchk (struct ata_port *ap, + unsigned int device) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 nsect, lbal; + + ap->ops->sff_dev_select(ap, device); + + out_be32(ioaddr->nsect_addr, 0x55); + out_be32(ioaddr->lbal_addr, 0xaa); + + out_be32(ioaddr->nsect_addr, 0xaa); + out_be32(ioaddr->lbal_addr, 0x55); + + out_be32(ioaddr->nsect_addr, 0x55); + out_be32(ioaddr->lbal_addr, 0xaa); + + nsect = in_be32(ioaddr->nsect_addr); + lbal = in_be32(ioaddr->lbal_addr); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} + +/** + * scc_wait_after_reset - wait for devices to become ready after reset + * + * Note: Original code is ata_sff_wait_after_reset + */ + +static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int dev0 = devmask & (1 << 0); + unsigned int dev1 = devmask & (1 << 1); + int rc, ret = 0; + + /* Spec mandates ">= 2ms" before checking status. We wait + * 150ms, because that was the magic delay used for ATAPI + * devices in Hale Landis's ATADRVR, for the period of time + * between when the ATA command register is written, and then + * status is checked. Because waiting for "a while" before + * checking status is fine, post SRST, we perform this magic + * delay here as well. + * + * Old drivers/ide uses the 2mS rule and then waits for ready. + */ + ata_msleep(ap, 150); + + /* always check readiness of the master device */ + rc = ata_sff_wait_ready(link, deadline); + /* -ENODEV means the odd clown forgot the D7 pulldown resistor + * and TF status is 0xff, bail out on it too. + */ + if (rc) + return rc; + + /* if device 1 was found in ata_devchk, wait for register + * access briefly, then wait for BSY to clear. + */ + if (dev1) { + int i; + + ap->ops->sff_dev_select(ap, 1); + + /* Wait for register access. Some ATAPI devices fail + * to set nsect/lbal after reset, so don't waste too + * much time on it. We're gonna wait for !BSY anyway. + */ + for (i = 0; i < 2; i++) { + u8 nsect, lbal; + + nsect = in_be32(ioaddr->nsect_addr); + lbal = in_be32(ioaddr->lbal_addr); + if ((nsect == 1) && (lbal == 1)) + break; + ata_msleep(ap, 50); /* give drive a breather */ + } + + rc = ata_sff_wait_ready(link, deadline); + if (rc) { + if (rc != -ENODEV) + return rc; + ret = rc; + } + } + + /* is all this really necessary? */ + ap->ops->sff_dev_select(ap, 0); + if (dev1) + ap->ops->sff_dev_select(ap, 1); + if (dev0) + ap->ops->sff_dev_select(ap, 0); + + return ret; +} + +/** + * scc_bus_softreset - PATA device software reset + * + * Note: Original code is ata_bus_softreset(). + */ + +static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, + unsigned long deadline) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); + + /* software reset. causes dev0 to be selected */ + out_be32(ioaddr->ctl_addr, ap->ctl); + udelay(20); + out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST); + udelay(20); + out_be32(ioaddr->ctl_addr, ap->ctl); + + scc_wait_after_reset(&ap->link, devmask, deadline); + + return 0; +} + +/** + * scc_softreset - reset host port via ATA SRST + * @ap: port to reset + * @classes: resulting classes of attached devices + * @deadline: deadline jiffies for the operation + * + * Note: Original code is ata_sff_softreset(). + */ + +static int scc_softreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; + unsigned int devmask = 0, err_mask; + u8 err; + + DPRINTK("ENTER\n"); + + /* determine if device 0/1 are present */ + if (scc_devchk(ap, 0)) + devmask |= (1 << 0); + if (slave_possible && scc_devchk(ap, 1)) + devmask |= (1 << 1); + + /* select device 0 again */ + ap->ops->sff_dev_select(ap, 0); + + /* issue bus reset */ + DPRINTK("about to softreset, devmask=%x\n", devmask); + err_mask = scc_bus_softreset(ap, devmask, deadline); + if (err_mask) { + ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask); + return -EIO; + } + + /* determine by signature whether we have ATA or ATAPI devices */ + classes[0] = ata_sff_dev_classify(&ap->link.device[0], + devmask & (1 << 0), &err); + if (slave_possible && err != 0x81) + classes[1] = ata_sff_dev_classify(&ap->link.device[1], + devmask & (1 << 1), &err); + + DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); + return 0; +} + +/** + * scc_bmdma_stop - Stop PCI IDE BMDMA transfer + * @qc: Command we are ending DMA for + */ + +static void scc_bmdma_stop (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; + void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR]; + u32 reg; + + while (1) { + reg = in_be32(bmid_base + SCC_DMA_INTST); + + if (reg & INTSTS_SERROR) { + printk(KERN_WARNING "%s: SERROR\n", DRV_NAME); + out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT); + out_be32(bmid_base + SCC_DMA_CMD, + in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); + continue; + } + + if (reg & INTSTS_PRERR) { + u32 maea0, maec0; + maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0); + maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0); + printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0); + out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT); + out_be32(bmid_base + SCC_DMA_CMD, + in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); + continue; + } + + if (reg & INTSTS_RERR) { + printk(KERN_WARNING "%s: Response Error\n", DRV_NAME); + out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT); + out_be32(bmid_base + SCC_DMA_CMD, + in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); + continue; + } + + if (reg & INTSTS_ICERR) { + out_be32(bmid_base + SCC_DMA_CMD, + in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); + printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME); + out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT); + continue; + } + + if (reg & INTSTS_BMSINT) { + unsigned int classes; + unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT); + printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); + out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); + /* TBD: SW reset */ + scc_softreset(&ap->link, &classes, deadline); + continue; + } + + if (reg & INTSTS_BMHE) { + out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE); + continue; + } + + if (reg & INTSTS_ACTEINT) { + out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT); + continue; + } + + if (reg & INTSTS_IOIRQS) { + out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS); + continue; + } + break; + } + + /* clear start/stop bit */ + out_be32(bmid_base + SCC_DMA_CMD, + in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_sff_dma_pause(ap); /* dummy read */ +} + +/** + * scc_bmdma_status - Read PCI IDE BMDMA status + * @ap: Port associated with this ATA transaction. + */ + +static u8 scc_bmdma_status (struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; + u8 host_stat = in_be32(mmio + SCC_DMA_STATUS); + u32 int_status = in_be32(mmio + SCC_DMA_INTST); + struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); + static int retry = 0; + + /* return if IOS_SS is cleared */ + if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START)) + return host_stat; + + /* errata A252,A308 workaround: Step4 */ + if ((scc_check_altstatus(ap) & ATA_ERR) + && (int_status & INTSTS_INTRQ)) + return (host_stat | ATA_DMA_INTR); + + /* errata A308 workaround Step5 */ + if (int_status & INTSTS_IOIRQS) { + host_stat |= ATA_DMA_INTR; + + /* We don't check ATAPI DMA because it is limited to UDMA4 */ + if ((qc->tf.protocol == ATA_PROT_DMA && + qc->dev->xfer_mode > XFER_UDMA_4)) { + if (!(int_status & INTSTS_ACTEINT)) { + printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n", + ap->print_id); + host_stat |= ATA_DMA_ERR; + if (retry++) + ap->udma_mask &= ~(1 << qc->dev->xfer_mode); + } else + retry = 0; + } + } + + return host_stat; +} + +/** + * scc_data_xfer - Transfer data by PIO + * @dev: device for this I/O + * @buf: data buffer + * @buflen: buffer length + * @rw: read/write + * + * Note: Original code is ata_sff_data_xfer(). + */ + +static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf, + unsigned int buflen, int rw) +{ + struct ata_port *ap = dev->link->ap; + unsigned int words = buflen >> 1; + unsigned int i; + __le16 *buf16 = (__le16 *) buf; + void __iomem *mmio = ap->ioaddr.data_addr; + + /* Transfer multiple of 2 bytes */ + if (rw == READ) + for (i = 0; i < words; i++) + buf16[i] = cpu_to_le16(in_be32(mmio)); + else + for (i = 0; i < words; i++) + out_be32(mmio, le16_to_cpu(buf16[i])); + + /* Transfer trailing 1 byte, if any. */ + if (unlikely(buflen & 0x01)) { + __le16 align_buf[1] = { 0 }; + unsigned char *trailing_buf = buf + buflen - 1; + + if (rw == READ) { + align_buf[0] = cpu_to_le16(in_be32(mmio)); + memcpy(trailing_buf, align_buf, 1); + } else { + memcpy(align_buf, trailing_buf, 1); + out_be32(mmio, le16_to_cpu(align_buf[0])); + } + words++; + } + + return words << 1; +} + +/** + * scc_postreset - standard postreset callback + * @ap: the target ata_port + * @classes: classes of attached devices + * + * Note: Original code is ata_sff_postreset(). + */ + +static void scc_postreset(struct ata_link *link, unsigned int *classes) +{ + struct ata_port *ap = link->ap; + + DPRINTK("ENTER\n"); + + /* is double-select really necessary? */ + if (classes[0] != ATA_DEV_NONE) + ap->ops->sff_dev_select(ap, 1); + if (classes[1] != ATA_DEV_NONE) + ap->ops->sff_dev_select(ap, 0); + + /* bail out if no device is present */ + if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { + DPRINTK("EXIT, no device\n"); + return; + } + + /* set up device control */ + out_be32(ap->ioaddr.ctl_addr, ap->ctl); + + DPRINTK("EXIT\n"); +} + +/** + * scc_irq_clear - Clear PCI IDE BMDMA interrupt. + * @ap: Port associated with this ATA transaction. + * + * Note: Original code is ata_bmdma_irq_clear(). + */ + +static void scc_irq_clear (struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + if (!mmio) + return; + + out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS)); +} + +/** + * scc_port_start - Set port up for dma. + * @ap: Port to initialize + * + * Allocate space for PRD table using ata_bmdma_port_start(). + * Set PRD table address for PTERADD. (PRD Transfer End Read) + */ + +static int scc_port_start (struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; + int rc; + + rc = ata_bmdma_port_start(ap); + if (rc) + return rc; + + out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma); + return 0; +} + +/** + * scc_port_stop - Undo scc_port_start() + * @ap: Port to shut down + * + * Reset PTERADD. + */ + +static void scc_port_stop (struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + out_be32(mmio + SCC_DMA_PTERADD, 0); +} + +static struct scsi_host_template scc_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations scc_pata_ops = { + .inherits = &ata_bmdma_port_ops, + + .set_piomode = scc_set_piomode, + .set_dmamode = scc_set_dmamode, + .mode_filter = scc_mode_filter, + + .sff_tf_load = scc_tf_load, + .sff_tf_read = scc_tf_read, + .sff_exec_command = scc_exec_command, + .sff_check_status = scc_check_status, + .sff_check_altstatus = scc_check_altstatus, + .sff_dev_select = scc_dev_select, + .sff_set_devctl = scc_set_devctl, + + .bmdma_setup = scc_bmdma_setup, + .bmdma_start = scc_bmdma_start, + .bmdma_stop = scc_bmdma_stop, + .bmdma_status = scc_bmdma_status, + .sff_data_xfer = scc_data_xfer, + + .cable_detect = ata_cable_80wire, + .softreset = scc_softreset, + .postreset = scc_postreset, + + .sff_irq_clear = scc_irq_clear, + + .port_start = scc_port_start, + .port_stop = scc_port_stop, +}; + +static struct ata_port_info scc_port_info[] = { + { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + /* No MWDMA */ + .udma_mask = ATA_UDMA6, + .port_ops = &scc_pata_ops, + }, +}; + +/** + * scc_reset_controller - initialize SCC PATA controller. + */ + +static int scc_reset_controller(struct ata_host *host) +{ + void __iomem *ctrl_base = host->iomap[SCC_CTRL_BAR]; + void __iomem *bmid_base = host->iomap[SCC_BMID_BAR]; + void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; + void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG; + void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE; + void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK; + void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS; + u32 reg = 0; + + out_be32(cckctrl_port, reg); + reg |= CCKCTRL_ATACLKOEN; + out_be32(cckctrl_port, reg); + reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN; + out_be32(cckctrl_port, reg); + reg |= CCKCTRL_CRST; + out_be32(cckctrl_port, reg); + + for (;;) { + reg = in_be32(cckctrl_port); + if (reg & CCKCTRL_CRST) + break; + udelay(5000); + } + + reg |= CCKCTRL_ATARESET; + out_be32(cckctrl_port, reg); + out_be32(ecmode_port, ECMODE_VALUE); + out_be32(mode_port, MODE_JCUSFEN); + out_be32(intmask_port, INTMASK_MSK); + + if (in_be32(dmastatus_port) & QCHSD_STPDIAG) { + printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME); + return -EIO; + } + + return 0; +} + +/** + * scc_setup_ports - initialize ioaddr with SCC PATA port offsets. + * @ioaddr: IO address structure to be initialized + * @base: base address of BMID region + */ + +static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base) +{ + ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR; + ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS; + ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS; + ioaddr->bmdma_addr = base; + ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA; + ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR; + ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE; + ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT; + ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL; + ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM; + ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH; + ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE; + ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS; + ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD; +} + +static int scc_host_init(struct ata_host *host) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + int rc; + + rc = scc_reset_controller(host); + if (rc) + return rc; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + + scc_setup_ports(&host->ports[0]->ioaddr, host->iomap[SCC_BMID_BAR]); + + pci_set_master(pdev); + + return 0; +} + +/** + * scc_init_one - Register SCC PATA device with kernel services + * @pdev: PCI device to register + * @ent: Entry in scc_pci_tbl matching with @pdev + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, or -ERRNO value. + */ + +static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned int board_idx = (unsigned int) ent->driver_data; + const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL }; + struct ata_host *host; + int rc; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1); + if (!host) + return -ENOMEM; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + host->iomap = pcim_iomap_table(pdev); + + ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl"); + ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid"); + + rc = scc_host_init(host); + if (rc) + return rc; + + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &scc_sht); +} + +static struct pci_driver scc_pci_driver = { + .name = DRV_NAME, + .id_table = scc_pci_tbl, + .probe = scc_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif +}; + +module_pci_driver(scc_pci_driver); + +MODULE_AUTHOR("Toshiba corp"); +MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, scc_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c new file mode 100644 index 00000000000..b920c3407f8 --- /dev/null +++ b/drivers/ata/pata_sch.c @@ -0,0 +1,180 @@ +/* + * pata_sch.c - Intel SCH PATA controllers + * + * Copyright (c) 2008 Alek Du <alek.du@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +/* + * Supports: + * Intel SCH (AF82US15W, AF82US15L, AF82UL11L) chipsets -- see spec at: + * http://download.intel.com/design/chipsets/embedded/datashts/319537.pdf + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> +#include <linux/dmi.h> + +#define DRV_NAME "pata_sch" +#define DRV_VERSION "0.2" + +/* see SCH datasheet page 351 */ +enum { + D0TIM = 0x80, /* Device 0 Timing Register */ + D1TIM = 0x84, /* Device 1 Timing Register */ + PM = 0x07, /* PIO Mode Bit Mask */ + MDM = (0x03 << 8), /* Multi-word DMA Mode Bit Mask */ + UDM = (0x07 << 16), /* Ultra DMA Mode Bit Mask */ + PPE = (1 << 30), /* Prefetch/Post Enable */ + USD = (1 << 31), /* Use Synchronous DMA */ +}; + +static int sch_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent); +static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev); +static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev); + +static const struct pci_device_id sch_pci_tbl[] = { + /* Intel SCH PATA Controller */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SCH_IDE), 0 }, + { } /* terminate list */ +}; + +static struct pci_driver sch_pci_driver = { + .name = DRV_NAME, + .id_table = sch_pci_tbl, + .probe = sch_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif +}; + +static struct scsi_host_template sch_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations sch_pata_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_unknown, + .set_piomode = sch_set_piomode, + .set_dmamode = sch_set_dmamode, +}; + +static struct ata_port_info sch_port_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, + .port_ops = &sch_pata_ops, +}; + +MODULE_AUTHOR("Alek Du <alek.du@intel.com>"); +MODULE_DESCRIPTION("SCSI low-level driver for Intel SCH PATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, sch_pci_tbl); +MODULE_VERSION(DRV_VERSION); + +/** + * sch_set_piomode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: ATA device + * + * Set PIO mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + unsigned int pio = adev->pio_mode - XFER_PIO_0; + struct pci_dev *dev = to_pci_dev(ap->host->dev); + unsigned int port = adev->devno ? D1TIM : D0TIM; + unsigned int data; + + pci_read_config_dword(dev, port, &data); + /* see SCH datasheet page 351 */ + /* set PIO mode */ + data &= ~(PM | PPE); + data |= pio; + /* enable PPE for block device */ + if (adev->class == ATA_DEV_ATA) + data |= PPE; + pci_write_config_dword(dev, port, data); +} + +/** + * sch_set_dmamode - Initialize host controller PATA DMA timings + * @ap: Port whose timings we are configuring + * @adev: ATA device + * + * Set MW/UDMA mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev) +{ + unsigned int dma_mode = adev->dma_mode; + struct pci_dev *dev = to_pci_dev(ap->host->dev); + unsigned int port = adev->devno ? D1TIM : D0TIM; + unsigned int data; + + pci_read_config_dword(dev, port, &data); + /* see SCH datasheet page 351 */ + if (dma_mode >= XFER_UDMA_0) { + /* enable Synchronous DMA mode */ + data |= USD; + data &= ~UDM; + data |= (dma_mode - XFER_UDMA_0) << 16; + } else { /* must be MWDMA mode, since we masked SWDMA already */ + data &= ~(USD | MDM); + data |= (dma_mode - XFER_MW_DMA_0) << 8; + } + pci_write_config_dword(dev, port, data); +} + +/** + * sch_init_one - Register SCH ATA PCI device with kernel services + * @pdev: PCI device to register + * @ent: Entry in sch_pci_tbl matching with @pdev + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, or -ERRNO value. + */ + +static int sch_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct ata_port_info *ppi[] = { &sch_port_info, NULL }; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0); +} + +module_pci_driver(sch_pci_driver); diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index a5c8d7e121d..fc5f31d4828 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -1,7 +1,7 @@ /* - * ata-serverworks.c - Serverworks PATA for new ATA layer + * pata_serverworks.c - Serverworks PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox <alan@redhat.com> + * (C) 2010 Bartlomiej Zolnierkiewicz * * based upon * @@ -34,14 +34,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_serverworks" -#define DRV_VERSION "0.3.7" +#define DRV_VERSION "0.4.3" #define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */ #define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */ @@ -58,31 +57,15 @@ static const char *csb_bad_ata100[] = { }; /** - * dell_cable - Dell serverworks cable detection + * oem_cable - Dell/Sun serverworks cable detection * @ap: ATA port to do cable detect * - * Dell hide the 40/80 pin select for their interfaces in the top two - * bits of the subsystem ID. + * Dell PowerEdge and Sun Cobalt 'Alpine' hide the 40/80 pin select + * for their interfaces in the top two bits of the subsystem ID. */ -static int dell_cable(struct ata_port *ap) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - - if (pdev->subsystem_device & (1 << (ap->port_no + 14))) - return ATA_CBL_PATA80; - return ATA_CBL_PATA40; -} - -/** - * sun_cable - Sun Cobalt 'Alpine' cable detection - * @ap: ATA port to do cable select - * - * Cobalt CSB5 IDE hides the 40/80pin in the top two bits of the - * subsystem ID the same as dell. We could use one function but we may - * need to extend the Dell one in future - */ - -static int sun_cable(struct ata_port *ap) { +static int oem_cable(struct ata_port *ap) +{ struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (pdev->subsystem_device & (1 << (ap->port_no + 14))) @@ -90,61 +73,34 @@ static int sun_cable(struct ata_port *ap) { return ATA_CBL_PATA40; } -/** - * osb4_cable - OSB4 cable detect - * @ap: ATA port to check - * - * The OSB4 isn't UDMA66 capable so this is easy - */ - -static int osb4_cable(struct ata_port *ap) { - return ATA_CBL_PATA40; -} - -/** - * csb4_cable - CSB5/6 cable detect - * @ap: ATA port to check - * - * Serverworks default arrangement is to use the drive side detection - * only. - */ - -static int csb_cable(struct ata_port *ap) { - return ATA_CBL_PATA80; -} - struct sv_cable_table { int device; int subvendor; int (*cable_detect)(struct ata_port *ap); }; -/* - * Note that we don't copy the old serverworks code because the old - * code contains obvious mistakes - */ - static struct sv_cable_table cable_detect[] = { - { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, dell_cable }, - { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, dell_cable }, - { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, sun_cable }, - { PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, osb4_cable }, - { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, csb_cable }, - { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, csb_cable }, - { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, csb_cable }, - { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, csb_cable }, + { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, oem_cable }, + { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, oem_cable }, + { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, oem_cable }, + { PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, ata_cable_40wire }, + { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, ata_cable_unknown }, + { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, ata_cable_unknown }, + { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, ata_cable_unknown }, + { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, ata_cable_unknown }, { } }; /** - * serverworks_pre_reset - cable detection + * serverworks_cable_detect - cable detection * @ap: ATA port * * Perform cable detection according to the device and subvendor * identifications */ -static int serverworks_pre_reset(struct ata_port *ap) { +static int serverworks_cable_detect(struct ata_port *ap) +{ struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct sv_cable_table *cb = cable_detect; @@ -152,8 +108,7 @@ static int serverworks_pre_reset(struct ata_port *ap) { if (cb->device == pdev->device && (cb->subvendor == pdev->subsystem_vendor || cb->subvendor == PCI_ANY_ID)) { - ap->cbl = cb->cable_detect(ap); - return ata_std_prereset(ap); + return cb->cable_detect(ap); } cb++; } @@ -162,11 +117,6 @@ static int serverworks_pre_reset(struct ata_port *ap) { return -1; /* kill compiler warning */ } -static void serverworks_error_handler(struct ata_port *ap) -{ - return ata_bmdma_drive_eh(ap, serverworks_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} - /** * serverworks_is_csb - Check for CSB or OSB * @pdev: PCI device to check @@ -191,58 +141,50 @@ static u8 serverworks_is_csb(struct pci_dev *pdev) /** * serverworks_osb4_filter - mode selection filter - * @ap: ATA interface * @adev: ATA device + * @mask: Mask of proposed modes * * Filter the offered modes for the device to apply controller * specific rules. OSB4 requires no UDMA for disks due to a FIFO * bug we hit. */ -static unsigned long serverworks_osb4_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) +static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned long mask) { if (adev->class == ATA_DEV_ATA) mask &= ~ATA_MASK_UDMA; - return ata_pci_default_filter(ap, adev, mask); + return mask; } /** * serverworks_csb_filter - mode selection filter - * @ap: ATA interface * @adev: ATA device + * @mask: Mask of proposed modes * * Check the blacklist and disable UDMA5 if matched */ -static unsigned long serverworks_csb_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) +static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned long mask) { const char *p; - char model_num[40]; - int len, i; + char model_num[ATA_ID_PROD_LEN + 1]; + int i; /* Disk, UDMA */ if (adev->class != ATA_DEV_ATA) - return ata_pci_default_filter(ap, adev, mask); + return mask; /* Actually do need to check */ - ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num)); - /* Precuationary - why not do this in the libata core ?? */ - - len = strlen(model_num); - while ((len > 0) && (model_num[len - 1] == ' ')) { - len--; - model_num[len] = 0; - } + ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); - for(i = 0; (p = csb_bad_ata100[i]) != NULL; i++) { - if (!strncmp(p, model_num, len)) - mask &= ~(0x1F << ATA_SHIFT_UDMA); + for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) { + if (!strcmp(p, model_num)) + mask &= ~(0xE0 << ATA_SHIFT_UDMA); } - return ata_pci_default_filter(ap, adev, mask); + return mask; } - /** * serverworks_set_piomode - set initial PIO mode data * @ap: ATA interface @@ -254,7 +196,7 @@ static unsigned long serverworks_csb_filter(const struct ata_port *ap, struct at static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev) { static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 }; - int offset = 1 + (2 * ap->port_no) - adev->devno; + int offset = 1 + 2 * ap->port_no - adev->devno; int devbits = (2 * ap->port_no + adev->devno) * 4; u16 csb5_pio; struct pci_dev *pdev = to_pci_dev(ap->host->dev); @@ -267,7 +209,7 @@ static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev if (serverworks_is_csb(pdev)) { pci_read_config_word(pdev, 0x4A, &csb5_pio); csb5_pio &= ~(0x0F << devbits); - pci_write_config_byte(pdev, 0x4A, csb5_pio | (pio << devbits)); + pci_write_config_word(pdev, 0x4A, csb5_pio | (pio << devbits)); } } @@ -285,117 +227,45 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev { static const u8 dma_mode[] = { 0x77, 0x21, 0x20 }; int offset = 1 + 2 * ap->port_no - adev->devno; - int devbits = (2 * ap->port_no + adev->devno); + int devbits = 2 * ap->port_no + adev->devno; u8 ultra; u8 ultra_cfg; struct pci_dev *pdev = to_pci_dev(ap->host->dev); pci_read_config_byte(pdev, 0x54, &ultra_cfg); + pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra); + ultra &= ~(0x0F << (adev->devno * 4)); if (adev->dma_mode >= XFER_UDMA_0) { pci_write_config_byte(pdev, 0x44 + offset, 0x20); - pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra); - ultra &= ~(0x0F << (ap->port_no * 4)); ultra |= (adev->dma_mode - XFER_UDMA_0) - << (ap->port_no * 4); - pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra); - + << (adev->devno * 4); ultra_cfg |= (1 << devbits); } else { pci_write_config_byte(pdev, 0x44 + offset, dma_mode[adev->dma_mode - XFER_MW_DMA_0]); ultra_cfg &= ~(1 << devbits); } + pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra); pci_write_config_byte(pdev, 0x54, ultra_cfg); } static struct scsi_host_template serverworks_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations serverworks_osb4_port_ops = { - .port_disable = ata_port_disable, + .inherits = &ata_bmdma_port_ops, + .cable_detect = serverworks_cable_detect, + .mode_filter = serverworks_osb4_filter, .set_piomode = serverworks_set_piomode, .set_dmamode = serverworks_set_dmamode, - .mode_filter = serverworks_osb4_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = serverworks_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; static struct ata_port_operations serverworks_csb_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = serverworks_set_piomode, - .set_dmamode = serverworks_set_dmamode, + .inherits = &serverworks_osb4_port_ops, .mode_filter = serverworks_csb_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = serverworks_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop }; static int serverworks_fixup_osb4(struct pci_dev *pdev) @@ -413,17 +283,14 @@ static int serverworks_fixup_osb4(struct pci_dev *pdev) pci_dev_put(isa_dev); return 0; } - printk(KERN_WARNING "ata_serverworks: Unable to find bridge.\n"); + printk(KERN_WARNING DRV_NAME ": Unable to find bridge.\n"); return -ENODEV; } static int serverworks_fixup_csb(struct pci_dev *pdev) { - u8 rev; u8 btr; - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); - /* Third Channel Test */ if (!(PCI_FUNC(pdev->devfn) & 1)) { struct pci_dev * findev = NULL; @@ -465,7 +332,7 @@ static int serverworks_fixup_csb(struct pci_dev *pdev) if (!(PCI_FUNC(pdev->devfn) & 1)) btr |= 0x2; else - btr |= (rev >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2; + btr |= (pdev->revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2; pci_write_config_byte(pdev, 0x5A, btr); return btr; @@ -481,52 +348,75 @@ static void serverworks_fixup_ht1000(struct pci_dev *pdev) pci_write_config_byte(pdev, 0x5A, btr); } +static int serverworks_fixup(struct pci_dev *pdev) +{ + int rc = 0; + + /* Force master latency timer to 64 PCI clocks */ + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40); + + switch (pdev->device) { + case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE: + rc = serverworks_fixup_osb4(pdev); + break; + case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE: + ata_pci_bmdma_clear_simplex(pdev); + /* fall through */ + case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE: + case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2: + rc = serverworks_fixup_csb(pdev); + break; + case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE: + serverworks_fixup_ht1000(pdev); + break; + } + + return rc; +} static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { - int ports = 2; - static struct ata_port_info info[4] = { + static const struct ata_port_info info[4] = { { /* OSB4 */ - .sht = &serverworks_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x07, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA2, .port_ops = &serverworks_osb4_port_ops }, { /* OSB4 no UDMA */ - .sht = &serverworks_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x00, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + /* No UDMA */ .port_ops = &serverworks_osb4_port_ops }, { /* CSB5 */ - .sht = &serverworks_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x1f, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA4, .port_ops = &serverworks_csb_port_ops }, { /* CSB5 - later revisions*/ - .sht = &serverworks_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &serverworks_csb_port_ops } }; - static struct ata_port_info *port_info[2]; - struct ata_port_info *devinfo = &info[id->driver_data]; + const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL }; + int rc; - /* Force master latency timer to 64 PCI clocks */ - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40); + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + rc = serverworks_fixup(pdev); /* OSB4 : South Bridge and IDE */ if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { /* Select non UDMA capable OSB4 if we can't do fixups */ - if ( serverworks_fixup_osb4(pdev) < 0) - devinfo = &info[1]; + if (rc < 0) + ppi[0] = &info[1]; } /* setup CSB5/CSB6 : South Bridge and IDE option RAID */ else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) || @@ -535,57 +425,59 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id /* If the returned btr is the newer revision then select the right info block */ - if (serverworks_fixup_csb(pdev) == 3) - devinfo = &info[3]; + if (rc == 3) + ppi[0] = &info[3]; /* Is this the 3rd channel CSB6 IDE ? */ if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2) - ports = 1; + ppi[1] = &ata_dummy_port_info; } - /* setup HT1000E */ - else if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) - serverworks_fixup_ht1000(pdev); - if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) - ata_pci_clear_simplex(pdev); + return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0); +} + +#ifdef CONFIG_PM_SLEEP +static int serverworks_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + (void)serverworks_fixup(pdev); - port_info[0] = port_info[1] = devinfo; - return ata_pci_init_one(pdev, port_info, ports); + ata_host_resume(host); + return 0; } +#endif + +static const struct pci_device_id serverworks[] = { + { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0}, + { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2}, + { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2}, + { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2}, + { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2}, -static struct pci_device_id serverworks[] = { - { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0}, - { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2}, - { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2}, - { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2}, - { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2}, - { 0, }, + { }, }; static struct pci_driver serverworks_pci_driver = { .name = DRV_NAME, .id_table = serverworks, .probe = serverworks_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = serverworks_reinit_one, +#endif }; -static int __init serverworks_init(void) -{ - return pci_register_driver(&serverworks_pci_driver); -} - - -static void __exit serverworks_exit(void) -{ - pci_unregister_driver(&serverworks_pci_driver); -} - +module_pci_driver(serverworks_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, serverworks); MODULE_VERSION(DRV_VERSION); - -module_init(serverworks_init); -module_exit(serverworks_exit); diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index c8b2e26db70..f597edccede 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c @@ -1,7 +1,6 @@ /* * pata_sil680.c - SIL680 PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox <alan@redhat.com> * * based upon * @@ -12,11 +11,11 @@ * * May be copied or modified under the terms of the GNU General Public License * - * Documentation publically available. + * Documentation publicly available. * * If you have strange problems with nVidia chipset systems please * see the SI support documentation and update your system BIOS - * if neccessary + * if necessary * * TODO * If we know all our devices are LBA28 (or LBA28 sized) we could use @@ -26,23 +25,25 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_sil680" -#define DRV_VERSION "0.3.2" +#define DRV_VERSION "0.4.9" + +#define SIL680_MMIO_BAR 5 /** * sil680_selreg - return register base - * @hwif: interface + * @ap: ATA interface * @r: config offset * - * Turn a config register offset into the right address in either - * PCI space or MMIO space to access the control register in question - * Thankfully this is a configuration operation so isnt performance + * Turn a config register offset into the right address in PCI space + * to access the control register in question. + * + * Thankfully this is a configuration operation so isn't performance * criticial. */ @@ -55,12 +56,12 @@ static unsigned long sil680_selreg(struct ata_port *ap, int r) /** * sil680_seldev - return register base - * @hwif: interface + * @ap: ATA interface * @r: config offset * - * Turn a config register offset into the right address in either - * PCI space or MMIO space to access the control register in question - * including accounting for the unit shift. + * Turn a config register offset into the right address in PCI space + * to access the control register in question including accounting for + * the unit shift. */ static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r) @@ -80,7 +81,8 @@ static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, * space for us. */ -static int sil680_cable_detect(struct ata_port *ap) { +static int sil680_cable_detect(struct ata_port *ap) +{ struct pci_dev *pdev = to_pci_dev(ap->host->dev); unsigned long addr = sil680_selreg(ap, 0); u8 ata66; @@ -91,39 +93,8 @@ static int sil680_cable_detect(struct ata_port *ap) { return ATA_CBL_PATA40; } -static int sil680_pre_reset(struct ata_port *ap) -{ - ap->cbl = sil680_cable_detect(ap); - return ata_std_prereset(ap); -} - -/** - * sil680_bus_reset - reset the SIL680 bus - * @ap: ATA port to reset - * - * Perform the SIL680 housekeeping when doing an ATA bus reset - */ - -static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes) -{ - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - unsigned long addr = sil680_selreg(ap, 0); - u8 reset; - - pci_read_config_byte(pdev, addr, &reset); - pci_write_config_byte(pdev, addr, reset | 0x03); - udelay(25); - pci_write_config_byte(pdev, addr, reset); - return ata_std_softreset(ap, classes); -} - -static void sil680_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, sil680_pre_reset, sil680_bus_reset, NULL, ata_std_postreset); -} - /** - * sil680_set_piomode - set initial PIO mode data + * sil680_set_piomode - set PIO mode data * @ap: ATA interface * @adev: ATA device * @@ -134,15 +105,22 @@ static void sil680_error_handler(struct ata_port *ap) static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev) { - static u16 speed_p[5] = { 0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1 }; - static u16 speed_t[5] = { 0x328A, 0x1281, 0x1281, 0x10C3, 0x10C1 }; + static const u16 speed_p[5] = { + 0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1 + }; + static const u16 speed_t[5] = { + 0x328A, 0x2283, 0x1281, 0x10C3, 0x10C1 + }; unsigned long tfaddr = sil680_selreg(ap, 0x02); unsigned long addr = sil680_seldev(ap, adev, 0x04); + unsigned long addr_mask = 0x80 + 4 * ap->port_no; struct pci_dev *pdev = to_pci_dev(ap->host->dev); int pio = adev->pio_mode - XFER_PIO_0; int lowest_pio = pio; + int port_shift = 4 * adev->devno; u16 reg; + u8 mode; struct ata_device *pair = ata_dev_pair(adev); @@ -153,29 +131,37 @@ static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev) pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]); pci_read_config_word(pdev, tfaddr-2, ®); + pci_read_config_byte(pdev, addr_mask, &mode); + reg &= ~0x0200; /* Clear IORDY */ - if (ata_pio_need_iordy(adev)) + mode &= ~(3 << port_shift); /* Clear IORDY and DMA bits */ + + if (ata_pio_need_iordy(adev)) { reg |= 0x0200; /* Enable IORDY */ + mode |= 1 << port_shift; + } pci_write_config_word(pdev, tfaddr-2, reg); + pci_write_config_byte(pdev, addr_mask, mode); } /** - * sil680_set_dmamode - set initial DMA mode data + * sil680_set_dmamode - set DMA mode data * @ap: ATA interface * @adev: ATA device * - * Program the MWDMA/UDMA modes for the sil680 k - * chipset. The MWDMA mode values are pulled from a lookup table + * Program the MWDMA/UDMA modes for the sil680 chipset. + * + * The MWDMA mode values are pulled from a lookup table * while the chipset uses mode number for UDMA. */ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev) { - static u8 ultra_table[2][7] = { + static const u8 ultra_table[2][7] = { { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01, 0xFF }, /* 100MHz */ { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }, /* 133Mhz */ }; - static u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 }; + static const u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 }; struct pci_dev *pdev = to_pci_dev(ap->host->dev); unsigned long ma = sil680_seldev(ap, adev, 0x08); @@ -195,7 +181,7 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev) mode &= ~(0x03 << port_shift); /* Extract scsc */ - scsc = (scsc & 0x30) ? 1: 0; + scsc = (scsc & 0x30) ? 1 : 0; if (adev->dma_mode >= XFER_UDMA_0) { multi = 0x10C1; @@ -210,120 +196,103 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev) pci_write_config_word(pdev, ua, ultra); } +/** + * sil680_sff_exec_command - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA command, with proper synchronization with interrupt + * handler / other threads. Use our MMIO space for PCI posting to avoid + * a hideously slow cycle all the way to the device. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +static void sil680_sff_exec_command(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); + iowrite8(tf->command, ap->ioaddr.command_addr); + ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); +} + +static bool sil680_sff_irq_check(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + unsigned long addr = sil680_selreg(ap, 1); + u8 val; + + pci_read_config_byte(pdev, addr, &val); + + return val & 0x08; +} + static struct scsi_host_template sil680_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; + static struct ata_port_operations sil680_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = sil680_set_piomode, - .set_dmamode = sil680_set_dmamode, - .mode_filter = ata_pci_default_filter, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = sil680_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .inherits = &ata_bmdma32_port_ops, + .sff_exec_command = sil680_sff_exec_command, + .sff_irq_check = sil680_sff_irq_check, + .cable_detect = sil680_cable_detect, + .set_piomode = sil680_set_piomode, + .set_dmamode = sil680_set_dmamode, }; -static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id) +/** + * sil680_init_chip - chip setup + * @pdev: PCI device + * + * Perform all the chip setup which must be done both when the device + * is powered up on boot and when we resume in case we resumed from RAM. + * Returns the final clock settings. + */ + +static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio) { - static struct ata_port_info info = { - .sht = &sil680_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7f, - .port_ops = &sil680_port_ops - }; - static struct ata_port_info info_slow = { - .sht = &sil680_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, - .port_ops = &sil680_port_ops - }; - static struct ata_port_info *port_info[2] = {&info, &info}; - static int printed_version; - u32 class_rev = 0; u8 tmpbyte = 0; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - - pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev); - class_rev &= 0xff; - /* FIXME: double check */ - pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, (class_rev) ? 1 : 255); + /* FIXME: double check */ + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, + pdev->revision ? 1 : 255); pci_write_config_byte(pdev, 0x80, 0x00); pci_write_config_byte(pdev, 0x84, 0x00); pci_read_config_byte(pdev, 0x8A, &tmpbyte); - printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n", - tmpbyte & 1, tmpbyte & 0x30); - - switch(tmpbyte & 0x30) { - case 0x00: - /* 133 clock attempt to force it on */ - pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10); - break; - case 0x30: - /* if clocking is disabled */ - /* 133 clock attempt to force it on */ - pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20); - break; - case 0x10: - /* 133 already */ - break; - case 0x20: - /* BIOS set PCI x2 clocking */ - break; + dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n", + tmpbyte & 1, tmpbyte & 0x30); + + *try_mmio = 0; +#ifdef CONFIG_PPC + if (machine_is(cell)) + *try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5); +#endif + + switch (tmpbyte & 0x30) { + case 0x00: + /* 133 clock attempt to force it on */ + pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10); + break; + case 0x30: + /* if clocking is disabled */ + /* 133 clock attempt to force it on */ + pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20); + break; + case 0x10: + /* 133 already */ + break; + case 0x20: + /* BIOS set PCI x2 clocking */ + break; } pci_read_config_byte(pdev, 0x8A, &tmpbyte); - printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n", - tmpbyte & 1, tmpbyte & 0x30); - if ((tmpbyte & 0x30) == 0) - port_info[0] = port_info[1] = &info_slow; + dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n", + tmpbyte & 1, tmpbyte & 0x30); pci_write_config_byte(pdev, 0xA1, 0x72); pci_write_config_word(pdev, 0xA2, 0x328A); @@ -336,46 +305,140 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_write_config_dword(pdev, 0xB8, 0x43924392); pci_write_config_dword(pdev, 0xBC, 0x40094009); - switch(tmpbyte & 0x30) { - case 0x00: printk(KERN_INFO "sil680: 100MHz clock.\n");break; - case 0x10: printk(KERN_INFO "sil680: 133MHz clock.\n");break; - case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break; - /* This last case is _NOT_ ok */ - case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n"); - return -EIO; + switch (tmpbyte & 0x30) { + case 0x00: + printk(KERN_INFO "sil680: 100MHz clock.\n"); + break; + case 0x10: + printk(KERN_INFO "sil680: 133MHz clock.\n"); + break; + case 0x20: + printk(KERN_INFO "sil680: Using PCI clock.\n"); + break; + /* This last case is _NOT_ ok */ + case 0x30: + printk(KERN_ERR "sil680: Clock disabled ?\n"); } - return ata_pci_init_one(pdev, port_info, 2); + return tmpbyte & 0x30; } +static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &sil680_port_ops + }; + static const struct ata_port_info info_slow = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, + .port_ops = &sil680_port_ops + }; + const struct ata_port_info *ppi[] = { &info, NULL }; + struct ata_host *host; + void __iomem *mmio_base; + int rc, try_mmio; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + switch (sil680_init_chip(pdev, &try_mmio)) { + case 0: + ppi[0] = &info_slow; + break; + case 0x30: + return -ENODEV; + } + + if (!try_mmio) + goto use_ioports; + + /* Try to acquire MMIO resources and fallback to PIO if + * that fails + */ + rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME); + if (rc) + goto use_ioports; + + /* Allocate host and set it up */ + host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); + if (!host) + return -ENOMEM; + host->iomap = pcim_iomap_table(pdev); + + /* Setup DMA masks */ + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + pci_set_master(pdev); + + /* Get MMIO base and initialize port addresses */ + mmio_base = host->iomap[SIL680_MMIO_BAR]; + host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00; + host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80; + host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a; + host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a; + ata_sff_std_ports(&host->ports[0]->ioaddr); + host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08; + host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0; + host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca; + host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca; + ata_sff_std_ports(&host->ports[1]->ioaddr); + + /* Register & activate */ + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &sil680_sht); + +use_ioports: + return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0); +} + +#ifdef CONFIG_PM_SLEEP +static int sil680_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int try_mmio, rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + sil680_init_chip(pdev, &try_mmio); + ata_host_resume(host); + return 0; +} +#endif + static const struct pci_device_id sil680[] = { - { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_680), }, - { 0, }, + { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), }, + + { }, }; static struct pci_driver sil680_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = sil680, .probe = sil680_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = sil680_reinit_one, +#endif }; -static int __init sil680_init(void) -{ - return pci_register_driver(&sil680_pci_driver); -} - - -static void __exit sil680_exit(void) -{ - pci_unregister_driver(&sil680_pci_driver); -} - +module_pci_driver(sil680_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for SI680 PATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sil680); MODULE_VERSION(DRV_VERSION); - -module_init(sil680_init); -module_exit(sil680_exit); diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index 17791e2785f..626f989d5c6 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c @@ -1,7 +1,8 @@ /* * pata_sis.c - SiS ATA driver * - * (C) 2005 Red Hat <alan@redhat.com> + * (C) 2005 Red Hat + * (C) 2007,2009 Bartlomiej Zolnierkiewicz * * Based upon linux/drivers/ide/pci/sis5513.c * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> @@ -25,171 +26,163 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/ata.h> +#include "sis.h" #define DRV_NAME "pata_sis" -#define DRV_VERSION "0.4.4" +#define DRV_VERSION "0.5.2" struct sis_chipset { - u16 device; /* PCI host ID */ - struct ata_port_info *info; /* Info block */ + u16 device; /* PCI host ID */ + const struct ata_port_info *info; /* Info block */ /* Probably add family, cable detect type etc here to clean up code later */ }; +struct sis_laptop { + u16 device; + u16 subvendor; + u16 subdevice; +}; + +static const struct sis_laptop sis_laptop[] = { + /* devid, subvendor, subdev */ + { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ + { 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */ + { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */ + /* end marker */ + { 0, } +}; + +static int sis_short_ata40(struct pci_dev *dev) +{ + const struct sis_laptop *lap = &sis_laptop[0]; + + while (lap->device) { + if (lap->device == dev->device && + lap->subvendor == dev->subsystem_vendor && + lap->subdevice == dev->subsystem_device) + return 1; + lap++; + } + + return 0; +} + /** - * sis_port_base - return PCI configuration base for dev + * sis_old_port_base - return PCI configuration base for dev * @adev: device * * Returns the base of the PCI configuration registers for this port * number. */ -static int sis_port_base(struct ata_device *adev) +static int sis_old_port_base(struct ata_device *adev) { - return 0x40 + (4 * adev->ap->port_no) + (2 * adev->devno); + return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno); } /** - * sis_133_pre_reset - check for 40/80 pin - * @ap: Port + * sis_port_base - return PCI configuration base for dev + * @adev: device * - * Perform cable detection for the later UDMA133 capable - * SiS chipset. + * Returns the base of the PCI configuration registers for this port + * number. */ -static int sis_133_pre_reset(struct ata_port *ap) +static int sis_port_base(struct ata_device *adev) { - static const struct pci_bits sis_enable_bits[] = { - { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */ - { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */ - }; - + struct ata_port *ap = adev->link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u16 tmp; - - if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) - return -ENOENT; + int port = 0x40; + u32 reg54; - /* The top bit of this register is the cable detect bit */ - pci_read_config_word(pdev, 0x50 + 2 * ap->port_no, &tmp); - if (tmp & 0x8000) - ap->cbl = ATA_CBL_PATA40; - else - ap->cbl = ATA_CBL_PATA80; + /* If bit 30 is set then the registers are mapped at 0x70 not 0x40 */ + pci_read_config_dword(pdev, 0x54, ®54); + if (reg54 & 0x40000000) + port = 0x70; - return ata_std_prereset(ap); + return port + (8 * ap->port_no) + (4 * adev->devno); } /** - * sis_error_handler - Probe specified port on PATA host controller - * @ap: Port to probe + * sis_133_cable_detect - check for 40/80 pin + * @ap: Port + * @deadline: deadline jiffies for the operation * - * LOCKING: - * None (inherited from caller). + * Perform cable detection for the later UDMA133 capable + * SiS chipset. */ -static void sis_133_error_handler(struct ata_port *ap) +static int sis_133_cable_detect(struct ata_port *ap) { - ata_bmdma_drive_eh(ap, sis_133_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u16 tmp; + /* The top bit of this register is the cable detect bit */ + pci_read_config_word(pdev, 0x50 + 2 * ap->port_no, &tmp); + if ((tmp & 0x8000) && !sis_short_ata40(pdev)) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; +} /** - * sis_66_pre_reset - check for 40/80 pin + * sis_66_cable_detect - check for 40/80 pin * @ap: Port * * Perform cable detection on the UDMA66, UDMA100 and early UDMA133 * SiS IDE controllers. */ -static int sis_66_pre_reset(struct ata_port *ap) +static int sis_66_cable_detect(struct ata_port *ap) { - static const struct pci_bits sis_enable_bits[] = { - { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */ - { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */ - }; - struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 tmp; - if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) { - ata_port_disable(ap); - printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); - return 0; - } /* Older chips keep cable detect in bits 4/5 of reg 0x48 */ pci_read_config_byte(pdev, 0x48, &tmp); tmp >>= ap->port_no; - if (tmp & 0x10) - ap->cbl = ATA_CBL_PATA40; - else - ap->cbl = ATA_CBL_PATA80; - - return ata_std_prereset(ap); + if ((tmp & 0x10) && !sis_short_ata40(pdev)) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; } -/** - * sis_66_error_handler - Probe specified port on PATA host controller - * @ap: Port to probe - * @classes: - * - * LOCKING: - * None (inherited from caller). - */ - -static void sis_66_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, sis_66_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} /** - * sis_old_pre_reset - probe begin - * @ap: ATA port + * sis_pre_reset - probe begin + * @link: ATA link + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int sis_old_pre_reset(struct ata_port *ap) +static int sis_pre_reset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits sis_enable_bits[] = { { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */ { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */ }; + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) { - ata_port_disable(ap); - printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); - return 0; - } - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - - -/** - * sis_old_error_handler - Probe specified port on PATA host controller - * @ap: Port to probe - * - * LOCKING: - * None (inherited from caller). - */ + if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) + return -ENOENT; -static void sis_old_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, sis_old_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + /* Clear the FIFO settings. We can't enable the FIFO until + we know we are poking at a disk */ + pci_write_config_byte(pdev, 0x4B, 0); + return ata_sff_prereset(link, deadline); } + /** - * sis_set_fifo - Set RWP fifo bits for this device + * sis_set_fifo - Set RWP fifo bits for this device * @ap: Port * @adev: Device * @@ -232,13 +225,13 @@ static void sis_set_fifo(struct ata_port *ap, struct ata_device *adev) static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - int port = sis_port_base(adev); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + int port = sis_old_port_base(adev); u8 t1, t2; int speed = adev->pio_mode - XFER_PIO_0; - const u8 active[] = { 0x00, 0x07, 0x04, 0x03, 0x01 }; - const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 }; + static const u8 active[] = { 0x00, 0x07, 0x04, 0x03, 0x01 }; + static const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 }; sis_set_fifo(ap, adev); @@ -256,7 +249,7 @@ static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev) } /** - * sis_100_set_pioode - Initialize host controller PATA PIO timings + * sis_100_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device we are configuring for. * @@ -269,11 +262,11 @@ static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev) static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - int port = sis_port_base(adev); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + int port = sis_old_port_base(adev); int speed = adev->pio_mode - XFER_PIO_0; - const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 }; + static const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 }; sis_set_fifo(ap, adev); @@ -281,7 +274,7 @@ static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev) } /** - * sis_133_set_pioode - Initialize host controller PATA PIO timings + * sis_133_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device we are configuring for. * @@ -294,20 +287,19 @@ static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev) static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - int port = 0x40; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + int port; u32 t1; - u32 reg54; int speed = adev->pio_mode - XFER_PIO_0; - const u32 timing133[] = { + static const u32 timing133[] = { 0x28269000, /* Recovery << 24 | Act << 16 | Ini << 12 */ 0x0C266000, 0x04263000, 0x0C0A3000, 0x05093000 }; - const u32 timing100[] = { + static const u32 timing100[] = { 0x1E1C6000, /* Recovery << 24 | Act << 16 | Ini << 12 */ 0x091C4000, 0x031C2000, @@ -317,12 +309,7 @@ static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev) sis_set_fifo(ap, adev); - /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */ - pci_read_config_dword(pdev, 0x54, ®54); - if (reg54 & 0x40000000) - port = 0x70; - port += 8 * ap->port_no + 4 * adev->devno; - + port = sis_port_base(adev); pci_read_config_dword(pdev, port, &t1); t1 &= 0xC0C00FFF; /* Mask out timing */ @@ -348,28 +335,28 @@ static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev) static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); int speed = adev->dma_mode - XFER_MW_DMA_0; - int drive_pci = sis_port_base(adev); + int drive_pci = sis_old_port_base(adev); u16 timing; - const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 }; - const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 }; + static const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 }; + static const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 }; pci_read_config_word(pdev, drive_pci, &timing); if (adev->dma_mode < XFER_UDMA_0) { /* bits 3-0 hold recovery timing bits 8-10 active timing and - the higer bits are dependant on the device */ - timing &= ~ 0x870F; + the higher bits are dependent on the device */ + timing &= ~0x870F; timing |= mwdma_bits[speed]; - pci_write_config_word(pdev, drive_pci, timing); } else { /* Bit 15 is UDMA on/off, bit 13-14 are cycle time */ speed = adev->dma_mode - XFER_UDMA_0; timing &= ~0x6000; timing |= udma_bits[speed]; } + pci_write_config_word(pdev, drive_pci, timing); } /** @@ -387,25 +374,26 @@ static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev) static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); int speed = adev->dma_mode - XFER_MW_DMA_0; - int drive_pci = sis_port_base(adev); + int drive_pci = sis_old_port_base(adev); u16 timing; - const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 }; - const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000}; + /* MWDMA 0-2 and UDMA 0-5 */ + static const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 }; + static const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 }; pci_read_config_word(pdev, drive_pci, &timing); if (adev->dma_mode < XFER_UDMA_0) { /* bits 3-0 hold recovery timing bits 8-10 active timing and - the higer bits are dependant on the device, bit 15 udma */ - timing &= ~ 0x870F; + the higher bits are dependent on the device, bit 15 udma */ + timing &= ~0x870F; timing |= mwdma_bits[speed]; } else { /* Bit 15 is UDMA on/off, bit 12-14 are cycle time */ speed = adev->dma_mode - XFER_UDMA_0; - timing &= ~0x6000; + timing &= ~0xF000; timing |= udma_bits[speed]; } pci_write_config_word(pdev, drive_pci, timing); @@ -425,24 +413,24 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev) static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); int speed = adev->dma_mode - XFER_MW_DMA_0; - int drive_pci = sis_port_base(adev); - u16 timing; + int drive_pci = sis_old_port_base(adev); + u8 timing; - const u16 udma_bits[] = { 0x8B00, 0x8700, 0x8500, 0x8300, 0x8200, 0x8100}; + static const u8 udma_bits[] = { 0x8B, 0x87, 0x85, 0x83, 0x82, 0x81}; - pci_read_config_word(pdev, drive_pci, &timing); + pci_read_config_byte(pdev, drive_pci + 1, &timing); if (adev->dma_mode < XFER_UDMA_0) { /* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */ } else { - /* Bit 15 is UDMA on/off, bit 12-14 are cycle time */ + /* Bit 7 is UDMA on/off, bit 0-3 are cycle time */ speed = adev->dma_mode - XFER_UDMA_0; - timing &= ~0x0F00; + timing &= ~0x8F; timing |= udma_bits[speed]; } - pci_write_config_word(pdev, drive_pci, timing); + pci_write_config_byte(pdev, drive_pci + 1, timing); } /** @@ -451,8 +439,7 @@ static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev) * @adev: Device to program * * Set UDMA/MWDMA mode for device, in host controller PCI config space. - * Handles early SiS 961 bridges. Supports MWDMA as well unlike - * the old ide/pci driver. + * Handles early SiS 961 bridges. * * LOCKING: * None (inherited from caller). @@ -460,24 +447,24 @@ static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev) static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); int speed = adev->dma_mode - XFER_MW_DMA_0; - int drive_pci = sis_port_base(adev); - u16 timing; + int drive_pci = sis_old_port_base(adev); + u8 timing; + /* Low 4 bits are timing */ + static const u8 udma_bits[] = { 0x8F, 0x8A, 0x87, 0x85, 0x83, 0x82, 0x81}; - const u16 udma_bits[] = { 0x8F00, 0x8A00, 0x8700, 0x8500, 0x8300, 0x8200, 0x8100}; - - pci_read_config_word(pdev, drive_pci, &timing); + pci_read_config_byte(pdev, drive_pci + 1, &timing); if (adev->dma_mode < XFER_UDMA_0) { /* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */ } else { - /* Bit 15 is UDMA on/off, bit 12-14 are cycle time */ + /* Bit 7 is UDMA on/off, bit 0-3 are cycle time */ speed = adev->dma_mode - XFER_UDMA_0; - timing &= ~0x0F00; + timing &= ~0x8F; timing |= udma_bits[speed]; } - pci_write_config_word(pdev, drive_pci, timing); + pci_write_config_byte(pdev, drive_pci + 1, timing); } /** @@ -486,8 +473,6 @@ static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *a * @adev: Device to program * * Set UDMA/MWDMA mode for device, in host controller PCI config space. - * Handles early SiS 961 bridges. Supports MWDMA as well unlike - * the old ide/pci driver. * * LOCKING: * None (inherited from caller). @@ -495,32 +480,34 @@ static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *a static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - int speed = adev->dma_mode - XFER_MW_DMA_0; - int port = 0x40; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + int port; u32 t1; - u32 reg54; - - /* bits 4- cycle time 8 - cvs time */ - const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 }; - const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 }; - - /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */ - pci_read_config_dword(pdev, 0x54, ®54); - if (reg54 & 0x40000000) - port = 0x70; - port += (8 * ap->port_no) + (4 * adev->devno); + port = sis_port_base(adev); pci_read_config_dword(pdev, port, &t1); if (adev->dma_mode < XFER_UDMA_0) { + /* Recovery << 24 | Act << 16 | Ini << 12, like PIO modes */ + static const u32 timing_u100[] = { 0x19154000, 0x06072000, 0x04062000 }; + static const u32 timing_u133[] = { 0x221C6000, 0x0C0A3000, 0x05093000 }; + int speed = adev->dma_mode - XFER_MW_DMA_0; + + t1 &= 0xC0C00FFF; + /* disable UDMA */ t1 &= ~0x00000004; - /* FIXME: need data sheet to add MWDMA here. Also lacking on - ide/pci driver */ + if (t1 & 0x08) + t1 |= timing_u133[speed]; + else + t1 |= timing_u100[speed]; } else { - speed = adev->dma_mode - XFER_UDMA_0; - /* if & 8 no UDMA133 - need info for ... */ + /* bits 4- cycle time 8 - cvs time */ + static const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 }; + static const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 }; + int speed = adev->dma_mode - XFER_UDMA_0; + t1 &= ~0x00000FF0; + /* enable UDMA */ t1 |= 0x00000004; if (t1 & 0x08) t1 |= timing_u133[speed]; @@ -530,242 +517,138 @@ static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev) pci_write_config_dword(pdev, port, t1); } +/** + * sis_133_mode_filter - mode selection filter + * @adev: ATA device + * + * Block UDMA6 on devices that do not support it. + */ + +static unsigned long sis_133_mode_filter(struct ata_device *adev, unsigned long mask) +{ + struct ata_port *ap = adev->link->ap; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + int port = sis_port_base(adev); + u32 t1; + + pci_read_config_dword(pdev, port, &t1); + /* if ATA133 is disabled, mask it out */ + if (!(t1 & 0x08)) + mask &= ~(0xC0 << ATA_SHIFT_UDMA); + return mask; +} + static struct scsi_host_template sis_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations sis_133_for_sata_ops = { + .inherits = &ata_bmdma_port_ops, + .set_piomode = sis_133_set_piomode, + .set_dmamode = sis_133_set_dmamode, + .cable_detect = sis_133_cable_detect, }; -static const struct ata_port_operations sis_133_ops = { - .port_disable = ata_port_disable, +static struct ata_port_operations sis_base_ops = { + .inherits = &ata_bmdma_port_ops, + .prereset = sis_pre_reset, +}; + +static struct ata_port_operations sis_133_ops = { + .inherits = &sis_base_ops, .set_piomode = sis_133_set_piomode, .set_dmamode = sis_133_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = sis_133_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .cable_detect = sis_133_cable_detect, + .mode_filter = sis_133_mode_filter, }; -static const struct ata_port_operations sis_133_early_ops = { - .port_disable = ata_port_disable, +static struct ata_port_operations sis_133_early_ops = { + .inherits = &sis_base_ops, .set_piomode = sis_100_set_piomode, .set_dmamode = sis_133_early_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = sis_66_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .cable_detect = sis_66_cable_detect, }; -static const struct ata_port_operations sis_100_ops = { - .port_disable = ata_port_disable, +static struct ata_port_operations sis_100_ops = { + .inherits = &sis_base_ops, .set_piomode = sis_100_set_piomode, .set_dmamode = sis_100_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = sis_66_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .cable_detect = sis_66_cable_detect, }; -static const struct ata_port_operations sis_66_ops = { - .port_disable = ata_port_disable, +static struct ata_port_operations sis_66_ops = { + .inherits = &sis_base_ops, .set_piomode = sis_old_set_piomode, .set_dmamode = sis_66_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = sis_66_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .cable_detect = sis_66_cable_detect, }; -static const struct ata_port_operations sis_old_ops = { - .port_disable = ata_port_disable, +static struct ata_port_operations sis_old_ops = { + .inherits = &sis_base_ops, .set_piomode = sis_old_set_piomode, .set_dmamode = sis_old_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = sis_old_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .cable_detect = ata_cable_40wire, }; -static struct ata_port_info sis_info = { - .sht = &sis_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, - .udma_mask = 0, +static const struct ata_port_info sis_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + /* No UDMA */ .port_ops = &sis_old_ops, }; -static struct ata_port_info sis_info33 = { - .sht = &sis_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, - .udma_mask = ATA_UDMA2, /* UDMA 33 */ +static const struct ata_port_info sis_info33 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA2, .port_ops = &sis_old_ops, }; -static struct ata_port_info sis_info66 = { - .sht = &sis_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = ATA_UDMA4, /* UDMA 66 */ +static const struct ata_port_info sis_info66 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + /* No MWDMA */ + .udma_mask = ATA_UDMA4, .port_ops = &sis_66_ops, }; -static struct ata_port_info sis_info100 = { - .sht = &sis_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ +static const struct ata_port_info sis_info100 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + /* No MWDMA */ .udma_mask = ATA_UDMA5, .port_ops = &sis_100_ops, }; -static struct ata_port_info sis_info100_early = { - .sht = &sis_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, +static const struct ata_port_info sis_info100_early = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + /* No MWDMA */ .udma_mask = ATA_UDMA5, - .pio_mask = 0x1f, /* pio0-4 */ .port_ops = &sis_66_ops, }; -static struct ata_port_info sis_info133 = { - .sht = &sis_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ +static const struct ata_port_info sis_info133 = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &sis_133_ops, }; -static struct ata_port_info sis_info133_early = { - .sht = &sis_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, /* pio0-4 */ +const struct ata_port_info sis_info133_for_sata = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + /* No MWDMA */ + .udma_mask = ATA_UDMA6, + .port_ops = &sis_133_for_sata_ops, +}; +static const struct ata_port_info sis_info133_early = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + /* No MWDMA */ .udma_mask = ATA_UDMA6, .port_ops = &sis_133_early_ops, }; +/* Privately shared with the SiS180 SATA driver, not for use elsewhere */ +EXPORT_SYMBOL_GPL(sis_info133_for_sata); static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis) { @@ -826,7 +709,7 @@ static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis) * @pdev: PCI device to register * @ent: Entry in sis_pci_tbl matching with @pdev * - * Called from kernel PCI layer. We probe for combined mode (sigh), + * Called from kernel PCI layer. We probe for combined mode (sigh), * and then hand over control to libata, for it to do the rest. * * LOCKING: @@ -838,14 +721,14 @@ static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis) static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - static struct ata_port_info *port_info[2]; - struct ata_port_info *port; + const struct ata_port_info *ppi[] = { NULL, NULL }; struct pci_dev *host = NULL; struct sis_chipset *chipset = NULL; + struct sis_chipset *sets; + int rc; static struct sis_chipset sis_chipsets[] = { - + { 0x0968, &sis_info133 }, { 0x0966, &sis_info133 }, { 0x0965, &sis_info133 }, @@ -891,19 +774,19 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 0x0, &sis_info100 }; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, - "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - /* We have to find the bridge first */ + rc = pcim_enable_device(pdev); + if (rc) + return rc; - for (chipset = &sis_chipsets[0]; chipset->device; chipset++) { - host = pci_get_device(PCI_VENDOR_ID_SI, chipset->device, NULL); + /* We have to find the bridge first */ + for (sets = &sis_chipsets[0]; sets->device; sets++) { + host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL); if (host != NULL) { - if (chipset->device == 0x630) { /* SIS630 */ - u8 host_rev; - pci_read_config_byte(host, PCI_REVISION_ID, &host_rev); - if (host_rev >= 0x30) /* 630 ET */ + chipset = sets; /* Match found */ + if (sets->device == 0x630) { /* SIS630 */ + if (host->revision >= 0x30) /* 630 ET */ chipset = &sis100_early; } break; @@ -911,7 +794,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) } /* Look for concealed bridges */ - if (host == NULL) { + if (chipset == NULL) { /* Second check */ u32 idemisc; u16 trueid; @@ -926,17 +809,20 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) switch(trueid) { case 0x5518: /* SIS 962/963 */ + dev_info(&pdev->dev, + "SiS 962/963 MuTIOL IDE UDMA133 controller\n"); chipset = &sis133; if ((idemisc & 0x40000000) == 0) { pci_write_config_dword(pdev, 0x54, idemisc | 0x40000000); - printk(KERN_INFO "SIS5513: Switching to 5513 register mapping\n"); + dev_info(&pdev->dev, + "Switching to 5513 register mapping\n"); } break; case 0x0180: /* SIS 965/965L */ - chipset = &sis133; + chipset = &sis133; break; case 0x1180: /* SIS 966/966L */ - chipset = &sis133; + chipset = &sis133; break; } } @@ -947,7 +833,6 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) u16 trueid; u8 prefctl; u8 idecfg; - u8 sbrev; /* Try the second unmasking technique */ pci_read_config_byte(pdev, 0x4a, &idecfg); @@ -960,11 +845,10 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) lpc_bridge = pci_get_slot(pdev->bus, 0x10); /* Bus 0 Dev 2 Fn 0 */ if (lpc_bridge == NULL) break; - pci_read_config_byte(lpc_bridge, PCI_REVISION_ID, &sbrev); pci_read_config_byte(pdev, 0x49, &prefctl); pci_dev_put(lpc_bridge); - if (sbrev == 0x10 && (prefctl & 0x80)) { + if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) { chipset = &sis133_early; break; } @@ -978,18 +862,35 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (chipset == NULL) return -ENODEV; - port = chipset->info; - port->private_data = chipset; + ppi[0] = chipset->info; sis_fixup(pdev, chipset); - port_info[0] = port_info[1] = port; - return ata_pci_init_one(pdev, port_info, 2); + return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0); +} + +#ifdef CONFIG_PM_SLEEP +static int sis_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + sis_fixup(pdev, host->private_data); + + ata_host_resume(host); + return 0; } +#endif static const struct pci_device_id sis_pci_tbl[] = { - { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x5513), }, /* SiS 5513 */ - { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x5518), }, /* SiS 5518 */ + { PCI_VDEVICE(SI, 0x5513), }, /* SiS 5513 */ + { PCI_VDEVICE(SI, 0x5518), }, /* SiS 5518 */ + { PCI_VDEVICE(SI, 0x1180), }, /* SiS 1180 */ + { } }; @@ -998,25 +899,16 @@ static struct pci_driver sis_pci_driver = { .id_table = sis_pci_tbl, .probe = sis_init_one, .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = sis_reinit_one, +#endif }; -static int __init sis_init(void) -{ - return pci_register_driver(&sis_pci_driver); -} - -static void __exit sis_exit(void) -{ - pci_unregister_driver(&sis_pci_driver); -} - - -module_init(sis_init); -module_exit(sis_exit); +module_pci_driver(sis_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for SiS ATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sis_pci_tbl); MODULE_VERSION(DRV_VERSION); - diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index 5b762acc568..4935f61f629 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c @@ -1,25 +1,31 @@ /* * pata_sl82c105.c - SL82C105 PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox <alan@redhat.com> + * (C) 2011 Bartlomiej Zolnierkiewicz * * Based in part on linux/drivers/ide/pci/sl82c105.c * SL82C105/Winbond 553 IDE driver * * and in part on the documentation and errata sheet + * + * + * Note: The controller like many controllers has shared timings for + * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back + * in the dma_stop function. Thus we actually don't need a set_dmamode + * method as the PIO method is always called and will set the right PIO + * timing parameters. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_sl82c105" -#define DRV_VERSION "0.2.3" +#define DRV_VERSION "0.3.3" enum { /* @@ -36,29 +42,24 @@ enum { /** * sl82c105_pre_reset - probe begin - * @ap: ATA port + * @link: ATA link + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int sl82c105_pre_reset(struct ata_port *ap) +static int sl82c105_pre_reset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits sl82c105_enable_bits[] = { { 0x40, 1, 0x01, 0x01 }, { 0x40, 1, 0x10, 0x10 } }; + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) return -ENOENT; - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} - - -static void sl82c105_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, sl82c105_pre_reset, ata_std_softreset, NULL, ata_std_postreset); + return ata_sff_prereset(link, deadline); } @@ -126,33 +127,6 @@ static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *a } /** - * sl82c105_set_dmamode - set initial DMA mode data - * @ap: ATA interface - * @adev: ATA device - * - * Called to do the DMA mode setup. This replaces the PIO timings - * for the device in question. Set appropriate PIO timings not DMA - * timings at this point. - */ - -static void sl82c105_set_dmamode(struct ata_port *ap, struct ata_device *adev) -{ - switch(adev->dma_mode) { - case XFER_MW_DMA_0: - sl82c105_configure_piomode(ap, adev, 1); - break; - case XFER_MW_DMA_1: - sl82c105_configure_piomode(ap, adev, 3); - break; - case XFER_MW_DMA_2: - sl82c105_configure_piomode(ap, adev, 3); - break; - default: - BUG(); - } -} - -/** * sl82c105_reset_engine - Reset the DMA engine * @ap: ATA interface * @@ -187,7 +161,9 @@ static void sl82c105_bmdma_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + udelay(100); sl82c105_reset_engine(ap); + udelay(100); /* Set the clocks for DMA */ sl82c105_configure_dmamode(ap, qc->dev); @@ -216,60 +192,64 @@ static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc) ata_bmdma_stop(qc); sl82c105_reset_engine(ap); + udelay(100); /* This will redo the initial setup of the DMA device to matching PIO timings */ - sl82c105_set_dmamode(ap, qc->dev); + sl82c105_set_piomode(ap, qc->dev); } -static struct scsi_host_template sl82c105_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, -}; - -static struct ata_port_operations sl82c105_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = sl82c105_set_piomode, - .set_dmamode = sl82c105_set_dmamode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, +/** + * sl82c105_qc_defer - implement serialization + * @qc: command + * + * We must issue one command per host not per channel because + * of the reset bug. + * + * Q: is the scsi host lock sufficient ? + */ - .error_handler = sl82c105_error_handler, +static int sl82c105_qc_defer(struct ata_queued_cmd *qc) +{ + struct ata_host *host = qc->ap->host; + struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; + int rc; + + /* First apply the usual rules */ + rc = ata_std_qc_defer(qc); + if (rc != 0) + return rc; + + /* Now apply serialization rules. Only allow a command if the + other channel state machine is idle */ + if (alt && alt->qc_active) + return ATA_DEFER_PORT; + return 0; +} - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = sl82c105_bmdma_start, - .bmdma_stop = sl82c105_bmdma_stop, - .bmdma_status = ata_bmdma_status, +static bool sl82c105_sff_irq_check(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u32 val, mask = ap->port_no ? CTRL_IDE_IRQB : CTRL_IDE_IRQA; - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, + pci_read_config_dword(pdev, 0x40, &val); - .data_xfer = ata_pio_data_xfer, + return val & mask; +} - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, +static struct scsi_host_template sl82c105_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop +static struct ata_port_operations sl82c105_port_ops = { + .inherits = &ata_bmdma_port_ops, + .qc_defer = sl82c105_qc_defer, + .bmdma_start = sl82c105_bmdma_start, + .bmdma_stop = sl82c105_bmdma_stop, + .cable_detect = ata_cable_40wire, + .set_piomode = sl82c105_set_piomode, + .prereset = sl82c105_pre_reset, + .sff_irq_check = sl82c105_sff_irq_check, }; /** @@ -284,7 +264,6 @@ static struct ata_port_operations sl82c105_port_ops = { static int sl82c105_bridge_revision(struct pci_dev *pdev) { struct pci_dev *bridge; - u8 rev; /* * The bridge should be part of the same device, but function 0. @@ -306,80 +285,96 @@ static int sl82c105_bridge_revision(struct pci_dev *pdev) /* * We need to find function 0's revision, not function 1 */ - pci_read_config_byte(bridge, PCI_REVISION_ID, &rev); - pci_dev_put(bridge); - return rev; + return bridge->revision; } +static void sl82c105_fixup(struct pci_dev *pdev) +{ + u32 val; + + pci_read_config_dword(pdev, 0x40, &val); + val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; + pci_write_config_dword(pdev, 0x40, val); +} static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - static struct ata_port_info info_dma = { - .sht = &sl82c105_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + static const struct ata_port_info info_dma = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .port_ops = &sl82c105_port_ops }; - static struct ata_port_info info_early = { - .sht = &sl82c105_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, + static const struct ata_port_info info_early = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, .port_ops = &sl82c105_port_ops }; - static struct ata_port_info *port_info[2] = { &info_early, &info_early }; - u32 val; + /* for now use only the first port */ + const struct ata_port_info *ppi[] = { &info_early, + NULL }; int rev; + int rc; + + rc = pcim_enable_device(dev); + if (rc) + return rc; rev = sl82c105_bridge_revision(dev); if (rev == -1) - dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Unable to find bridge, disabling DMA.\n"); + dev_warn(&dev->dev, + "pata_sl82c105: Unable to find bridge, disabling DMA\n"); else if (rev <= 5) - dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Early bridge revision, no DMA available.\n"); - else { - port_info[0] = &info_dma; - port_info[1] = &info_dma; - } + dev_warn(&dev->dev, + "pata_sl82c105: Early bridge revision, no DMA available\n"); + else + ppi[0] = &info_dma; - pci_read_config_dword(dev, 0x40, &val); - val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; - pci_write_config_dword(dev, 0x40, val); + sl82c105_fixup(dev); + + return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0); +} + +#ifdef CONFIG_PM_SLEEP +static int sl82c105_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + sl82c105_fixup(pdev); - return ata_pci_init_one(dev, port_info, 1); /* For now */ + ata_host_resume(host); + return 0; } +#endif -static struct pci_device_id sl82c105[] = { - { PCI_DEVICE(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105), }, - { 0, }, +static const struct pci_device_id sl82c105[] = { + { PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), }, + + { }, }; static struct pci_driver sl82c105_pci_driver = { .name = DRV_NAME, .id_table = sl82c105, .probe = sl82c105_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = sl82c105_reinit_one, +#endif }; -static int __init sl82c105_init(void) -{ - return pci_register_driver(&sl82c105_pci_driver); -} - - -static void __exit sl82c105_exit(void) -{ - pci_unregister_driver(&sl82c105_pci_driver); -} - +module_pci_driver(sl82c105_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Sl82c105"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sl82c105); MODULE_VERSION(DRV_VERSION); - -module_init(sl82c105_init); -module_exit(sl82c105_exit); diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c index a954ed93a40..d9364af8eb6 100644 --- a/drivers/ata/pata_triflex.c +++ b/drivers/ata/pata_triflex.c @@ -1,7 +1,7 @@ /* * pata_triflex.c - Compaq PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox <alan@redhat.com> + * Alan Cox <alan@lxorguk.ukuu.org.uk> * * based upon * @@ -30,49 +30,45 @@ * Loosely based on the piix & svwks drivers. * * Documentation: - * Not publically available. + * Not publicly available. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_triflex" -#define DRV_VERSION "0.2.5" +#define DRV_VERSION "0.2.8" /** * triflex_prereset - probe begin - * @ap: ATA port + * @link: ATA link + * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ -static int triflex_prereset(struct ata_port *ap) +static int triflex_prereset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits triflex_enable_bits[] = { { 0x80, 1, 0x01, 0x01 }, { 0x80, 1, 0x02, 0x02 } }; + struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) return -ENOENT; - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); -} + return ata_sff_prereset(link, deadline); +} -static void triflex_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, triflex_prereset, ata_std_softreset, NULL, ata_std_postreset); -} /** * triflex_load_timing - timing configuration @@ -178,105 +174,75 @@ static void triflex_bmdma_stop(struct ata_queued_cmd *qc) } static struct scsi_host_template triflex_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations triflex_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = triflex_set_piomode, - .mode_filter = ata_pci_default_filter, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = triflex_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .bmdma_setup = ata_bmdma_setup, + .inherits = &ata_bmdma_port_ops, .bmdma_start = triflex_bmdma_start, .bmdma_stop = triflex_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - - .data_xfer = ata_pio_data_xfer, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop + .cable_detect = ata_cable_40wire, + .set_piomode = triflex_set_piomode, + .prereset = triflex_prereset, }; static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - static struct ata_port_info info = { - .sht = &triflex_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + static const struct ata_port_info info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .port_ops = &triflex_port_ops }; - static struct ata_port_info *port_info[2] = { &info, &info }; - static int printed_version; + const struct ata_port_info *ppi[] = { &info, NULL }; - if (!printed_version++) - dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&dev->dev, DRV_VERSION); - return ata_pci_init_one(dev, port_info, 2); + return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0); } static const struct pci_device_id triflex[] = { - { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { 0, }, -}; + { PCI_VDEVICE(COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE), }, -static struct pci_driver triflex_pci_driver = { - .name = DRV_NAME, - .id_table = triflex, - .probe = triflex_init_one, - .remove = ata_pci_remove_one + { }, }; -static int __init triflex_init(void) +#ifdef CONFIG_PM_SLEEP +static int triflex_ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { - return pci_register_driver(&triflex_pci_driver); -} + struct ata_host *host = pci_get_drvdata(pdev); + int rc = 0; + rc = ata_host_suspend(host, mesg); + if (rc) + return rc; -static void __exit triflex_exit(void) -{ - pci_unregister_driver(&triflex_pci_driver); + /* + * We must not disable or powerdown the device. + * APM bios refuses to suspend if IDE is not accessible. + */ + pci_save_state(pdev); + + return 0; } +#endif + +static struct pci_driver triflex_pci_driver = { + .name = DRV_NAME, + .id_table = triflex, + .probe = triflex_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = triflex_ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif +}; + +module_pci_driver(triflex_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Compaq Triflex"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, triflex); MODULE_VERSION(DRV_VERSION); - -module_init(triflex_init); -module_exit(triflex_exit); diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 7b5dd2343b9..1ca6bcab369 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -1,7 +1,6 @@ /* * pata_via.c - VIA PATA for new ATA layer * (C) 2005-2006 Red Hat Inc - * Alan Cox <alan@redhat.com> * * Documentation * Most chipset documentation available under NDA only @@ -23,6 +22,9 @@ * VIA VT8233c - UDMA100 * VIA VT8235 - UDMA133 * VIA VT8237 - UDMA133 + * VIA VT8237A - UDMA133 + * VIA VT8237S - UDMA133 + * VIA VT8251 - UDMA133 * * Most registers remain compatible across chips. Others start reserved * and acquire sensible semantics if set to 1 (eg cable detect). A few @@ -53,34 +55,29 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> +#include <linux/gfp.h> #include <scsi/scsi_host.h> #include <linux/libata.h> +#include <linux/dmi.h> #define DRV_NAME "pata_via" -#define DRV_VERSION "0.1.14" +#define DRV_VERSION "0.3.4" -/* - * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx - * driver. - */ +enum { + VIA_BAD_PREQ = 0x01, /* Crashes if PREQ# till DDACK# set */ + VIA_BAD_CLK66 = 0x02, /* 66 MHz clock doesn't work correctly */ + VIA_SET_FIFO = 0x04, /* Needs to have FIFO split set */ + VIA_NO_UNMASK = 0x08, /* Doesn't work with IRQ unmasking on */ + VIA_BAD_ID = 0x10, /* Has wrong vendor ID (0x1107) */ + VIA_BAD_AST = 0x20, /* Don't touch Address Setup Timing */ + VIA_NO_ENABLES = 0x40, /* Has no enablebits */ + VIA_SATA_PATA = 0x80, /* SATA/PATA combined configuration */ +}; enum { - VIA_UDMA = 0x007, - VIA_UDMA_NONE = 0x000, - VIA_UDMA_33 = 0x001, - VIA_UDMA_66 = 0x002, - VIA_UDMA_100 = 0x003, - VIA_UDMA_133 = 0x004, - VIA_BAD_PREQ = 0x010, /* Crashes if PREQ# till DDACK# set */ - VIA_BAD_CLK66 = 0x020, /* 66 MHz clock doesn't work correctly */ - VIA_SET_FIFO = 0x040, /* Needs to have FIFO split set */ - VIA_NO_UNMASK = 0x080, /* Doesn't work with IRQ unmasking on */ - VIA_BAD_ID = 0x100, /* Has wrong vendor ID (0x1107) */ - VIA_BAD_AST = 0x200, /* Don't touch Address Setup Timing */ - VIA_NO_ENABLES = 0x400, /* Has no enablebits */ + VIA_IDFLAG_SINGLE = (1 << 0), /* single channel controller) */ }; /* @@ -92,32 +89,82 @@ static const struct via_isa_bridge { u16 id; u8 rev_min; u8 rev_max; - u16 flags; + u8 udma_mask; + u8 flags; } via_isa_bridges[] = { - { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, - { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES}, - { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, - { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, - { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, - { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, - { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 }, - { "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, VIA_UDMA_100 }, - { "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, VIA_UDMA_100 }, - { "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, VIA_UDMA_100 }, - { "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, VIA_UDMA_66 }, - { "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 }, - { "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, VIA_UDMA_66 }, - { "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 }, - { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, VIA_UDMA_33 | VIA_SET_FIFO }, - { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, VIA_UDMA_33 | VIA_SET_FIFO | VIA_BAD_PREQ }, - { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, VIA_UDMA_33 | VIA_SET_FIFO }, - { "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, VIA_UDMA_33 | VIA_SET_FIFO }, - { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, VIA_UDMA_NONE | VIA_SET_FIFO }, - { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK }, - { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID }, + { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA }, + { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA }, + { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, + { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, + { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, + { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA }, + { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_NO_ENABLES }, + { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST | VIA_NO_ENABLES }, + { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, + { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, + { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, + { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, + { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, ATA_UDMA5, }, + { "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, ATA_UDMA5, }, + { "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, ATA_UDMA5, }, + { "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, ATA_UDMA5, }, + { "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, ATA_UDMA4, }, + { "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 }, + { "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, ATA_UDMA4, }, + { "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 }, + { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, ATA_UDMA2, VIA_SET_FIFO }, + { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, ATA_UDMA2, VIA_SET_FIFO | VIA_BAD_PREQ }, + { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, ATA_UDMA2, VIA_SET_FIFO }, + { "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, ATA_UDMA2, VIA_SET_FIFO }, + { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO }, + { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK }, + { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID }, + { "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { NULL } }; +static const struct dmi_system_id no_atapi_dma_dmi_table[] = { + { + .ident = "AVERATEC 3200", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"), + DMI_MATCH(DMI_BOARD_NAME, "3200"), + }, + }, + { } +}; + +struct via_port { + u8 cached_device; +}; + +/* + * Cable special cases + */ + +static const struct dmi_system_id cable_dmi_table[] = { + { + .ident = "Acer Ferrari 3400", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer,Inc."), + DMI_MATCH(DMI_BOARD_NAME, "Ferrari 3400"), + }, + }, + { } +}; + +static int via_cable_override(struct pci_dev *pdev) +{ + /* Systems by DMI */ + if (dmi_check_system(cable_dmi_table)) + return 1; + /* Arima W730-K8/Targa Visionary 811/... */ + if (pdev->subsystem_vendor == 0x161F && pdev->subsystem_device == 0x2032) + return 1; + return 0; +} + + /** * via_cable_detect - cable detection * @ap: ATA port @@ -131,20 +178,38 @@ static const struct via_isa_bridge { */ static int via_cable_detect(struct ata_port *ap) { + const struct via_isa_bridge *config = ap->host->private_data; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 ata66; + if (via_cable_override(pdev)) + return ATA_CBL_PATA40_SHORT; + + if ((config->flags & VIA_SATA_PATA) && ap->port_no == 0) + return ATA_CBL_SATA; + + /* Early chips are 40 wire */ + if (config->udma_mask < ATA_UDMA4) + return ATA_CBL_PATA40; + /* UDMA 66 chips have only drive side logic */ + else if (config->udma_mask < ATA_UDMA5) + return ATA_CBL_PATA_UNK; + /* UDMA 100 or later */ pci_read_config_dword(pdev, 0x50, &ata66); /* Check both the drive cable reporting bits, we might not have two drives */ if (ata66 & (0x10100000 >> (16 * ap->port_no))) return ATA_CBL_PATA80; - else - return ATA_CBL_PATA40; + /* Check with ACPI so we can spot BIOS reported SATA bridges */ + if (ata_acpi_init_gtm(ap) && + ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap))) + return ATA_CBL_PATA80; + return ATA_CBL_PATA40; } -static int via_pre_reset(struct ata_port *ap) +static int via_pre_reset(struct ata_link *link, unsigned long deadline) { + struct ata_port *ap = link->ap; const struct via_isa_bridge *config = ap->host->private_data; if (!(config->flags & VIA_NO_ENABLES)) { @@ -152,39 +217,20 @@ static int via_pre_reset(struct ata_port *ap) { 0x40, 1, 0x02, 0x02 }, { 0x40, 1, 0x01, 0x01 } }; - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no])) return -ENOENT; } - if ((config->flags & VIA_UDMA) >= VIA_UDMA_66) - ap->cbl = via_cable_detect(ap); - else - ap->cbl = ATA_CBL_PATA40; - return ata_std_prereset(ap); + return ata_sff_prereset(link, deadline); } /** - * via_error_handler - reset for VIA chips - * @ap: ATA port - * - * Handle the reset callback for the later chips with cable detect - */ - -static void via_error_handler(struct ata_port *ap) -{ - ata_bmdma_drive_eh(ap, via_pre_reset, ata_std_softreset, NULL, ata_std_postreset); -} - -/** - * via_do_set_mode - set initial PIO mode data + * via_do_set_mode - set transfer mode data * @ap: ATA interface * @adev: ATA device * @mode: ATA mode being programmed - * @tdiv: Clocks per PCI clock * @set_ast: Set to program address setup * @udma_type: UDMA mode/format of registers * @@ -195,17 +241,26 @@ static void via_error_handler(struct ata_port *ap) * on the two channels. */ -static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mode, int tdiv, int set_ast, int udma_type) +static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, + int mode, int set_ast, int udma_type) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct ata_device *peer = ata_dev_pair(adev); struct ata_timing t, p; - static int via_clock = 33333; /* Bus clock in kHZ - ought to be tunable one day */ + static int via_clock = 33333; /* Bus clock in kHZ */ unsigned long T = 1000000000 / via_clock; - unsigned long UT = T/tdiv; + unsigned long UT = T; int ut; int offset = 3 - (2*ap->port_no) - adev->devno; + switch (udma_type) { + case ATA_UDMA4: + UT = T / 2; break; + case ATA_UDMA5: + UT = T / 3; break; + case ATA_UDMA6: + UT = T / 4; break; + } /* Calculate the timing values we require */ ata_timing_compute(adev, mode, &t, T, UT); @@ -225,152 +280,258 @@ static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mo pci_read_config_byte(pdev, 0x4C, &setup); setup &= ~(3 << shift); - setup |= FIT(t.setup, 1, 4) << shift; /* 1,4 or 1,4 - 1 FIXME */ + setup |= (clamp_val(t.setup, 1, 4) - 1) << shift; pci_write_config_byte(pdev, 0x4C, setup); } /* Load the PIO mode bits */ pci_write_config_byte(pdev, 0x4F - ap->port_no, - ((FIT(t.act8b, 1, 16) - 1) << 4) | (FIT(t.rec8b, 1, 16) - 1)); + ((clamp_val(t.act8b, 1, 16) - 1) << 4) | (clamp_val(t.rec8b, 1, 16) - 1)); pci_write_config_byte(pdev, 0x48 + offset, - ((FIT(t.active, 1, 16) - 1) << 4) | (FIT(t.recover, 1, 16) - 1)); + ((clamp_val(t.active, 1, 16) - 1) << 4) | (clamp_val(t.recover, 1, 16) - 1)); /* Load the UDMA bits according to type */ - switch(udma_type) { - default: - /* BUG() ? */ - /* fall through */ - case 33: - ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 5) - 2)) : 0x03; - break; - case 66: - ut = t.udma ? (0xe8 | (FIT(t.udma, 2, 9) - 2)) : 0x0f; - break; - case 100: - ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07; - break; - case 133: - ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07; - break; + switch (udma_type) { + case ATA_UDMA2: + default: + ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 5) - 2)) : 0x03; + break; + case ATA_UDMA4: + ut = t.udma ? (0xe8 | (clamp_val(t.udma, 2, 9) - 2)) : 0x0f; + break; + case ATA_UDMA5: + ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07; + break; + case ATA_UDMA6: + ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07; + break; } + /* Set UDMA unless device is not UDMA capable */ - if (udma_type) - pci_write_config_byte(pdev, 0x50 + offset, ut); + if (udma_type) { + u8 udma_etc; + + pci_read_config_byte(pdev, 0x50 + offset, &udma_etc); + + /* clear transfer mode bit */ + udma_etc &= ~0x20; + + if (t.udma) { + /* preserve 80-wire cable detection bit */ + udma_etc &= 0x10; + udma_etc |= ut; + } + + pci_write_config_byte(pdev, 0x50 + offset, udma_etc); + } } static void via_set_piomode(struct ata_port *ap, struct ata_device *adev) { const struct via_isa_bridge *config = ap->host->private_data; int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1; - int mode = config->flags & VIA_UDMA; - static u8 tclock[5] = { 1, 1, 2, 3, 4 }; - static u8 udma[5] = { 0, 33, 66, 100, 133 }; - via_do_set_mode(ap, adev, adev->pio_mode, tclock[mode], set_ast, udma[mode]); + via_do_set_mode(ap, adev, adev->pio_mode, set_ast, config->udma_mask); } static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev) { const struct via_isa_bridge *config = ap->host->private_data; int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1; - int mode = config->flags & VIA_UDMA; - static u8 tclock[5] = { 1, 1, 2, 3, 4 }; - static u8 udma[5] = { 0, 33, 66, 100, 133 }; - via_do_set_mode(ap, adev, adev->dma_mode, tclock[mode], set_ast, udma[mode]); + via_do_set_mode(ap, adev, adev->dma_mode, set_ast, config->udma_mask); } -static struct scsi_host_template via_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .max_sectors = ATA_MAX_SECTORS, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .bios_param = ata_std_bios_param, -}; +/** + * via_mode_filter - filter buggy device/mode pairs + * @dev: ATA device + * @mask: Mode bitmask + * + * We need to apply some minimal filtering for old controllers and at least + * one breed of Transcend SSD. Return the updated mask. + */ -static struct ata_port_operations via_port_ops = { - .port_disable = ata_port_disable, - .set_piomode = via_set_piomode, - .set_dmamode = via_set_dmamode, - .mode_filter = ata_pci_default_filter, +static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask) +{ + struct ata_host *host = dev->link->ap->host; + const struct via_isa_bridge *config = host->private_data; + unsigned char model_num[ATA_ID_PROD_LEN + 1]; + + if (config->id == PCI_DEVICE_ID_VIA_82C586_0) { + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); + if (strcmp(model_num, "TS64GSSD25-M") == 0) { + ata_dev_warn(dev, + "disabling UDMA mode due to reported lockups with this device\n"); + mask &= ~ ATA_MASK_UDMA; + } + } + + if (dev->class == ATA_DEV_ATAPI && + dmi_check_system(no_atapi_dma_dmi_table)) { + ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n"); + mask &= ATA_MASK_PIO; + } + + return mask; +} + +/** + * via_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller. + * + * Note: This is to fix the internal bug of via chipsets, which + * will reset the device register after changing the IEN bit on + * ctl register + */ +static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + struct via_port *vp = ap->private_data; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + int newctl = 0; + + if (tf->ctl != ap->last_ctl) { + iowrite8(tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + newctl = 1; + } - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, + if (tf->flags & ATA_TFLAG_DEVICE) { + iowrite8(tf->device, ioaddr->device_addr); + vp->cached_device = tf->device; + } else if (newctl) + iowrite8(vp->cached_device, ioaddr->device_addr); + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + WARN_ON_ONCE(!ioaddr->ctl_addr); + iowrite8(tf->hob_feature, ioaddr->feature_addr); + iowrite8(tf->hob_nsect, ioaddr->nsect_addr); + iowrite8(tf->hob_lbal, ioaddr->lbal_addr); + iowrite8(tf->hob_lbam, ioaddr->lbam_addr); + iowrite8(tf->hob_lbah, ioaddr->lbah_addr); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = via_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, + if (is_addr) { + iowrite8(tf->feature, ioaddr->feature_addr); + iowrite8(tf->nsect, ioaddr->nsect_addr); + iowrite8(tf->lbal, ioaddr->lbal_addr); + iowrite8(tf->lbam, ioaddr->lbam_addr); + iowrite8(tf->lbah, ioaddr->lbah_addr); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, + ata_wait_idle(ap); +} - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, +static int via_port_start(struct ata_port *ap) +{ + struct via_port *vp; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); - .data_xfer = ata_pio_data_xfer, + int ret = ata_bmdma_port_start(ap); + if (ret < 0) + return ret; - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, + vp = devm_kzalloc(&pdev->dev, sizeof(struct via_port), GFP_KERNEL); + if (vp == NULL) + return -ENOMEM; + ap->private_data = vp; + return 0; +} - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop +static struct scsi_host_template via_sht = { + ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations via_port_ops_noirq = { - .port_disable = ata_port_disable, +static struct ata_port_operations via_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = via_cable_detect, .set_piomode = via_set_piomode, .set_dmamode = via_set_dmamode, - .mode_filter = ata_pci_default_filter, + .prereset = via_pre_reset, + .sff_tf_load = via_tf_load, + .port_start = via_port_start, + .mode_filter = via_mode_filter, +}; + +static struct ata_port_operations via_port_ops_noirq = { + .inherits = &via_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, +}; + +/** + * via_config_fifo - set up the FIFO + * @pdev: PCI device + * @flags: configuration flags + * + * Set the FIFO properties for this device if necessary. Used both on + * set up and on and the resume path + */ + +static void via_config_fifo(struct pci_dev *pdev, unsigned int flags) +{ + u8 enable; - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, + /* 0x40 low bits indicate enabled channels */ + pci_read_config_byte(pdev, 0x40 , &enable); + enable &= 3; - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = via_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, + if (flags & VIA_SET_FIFO) { + static const u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20}; + u8 fifo; - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, + pci_read_config_byte(pdev, 0x43, &fifo); - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, + /* Clear PREQ# until DDACK# for errata */ + if (flags & VIA_BAD_PREQ) + fifo &= 0x7F; + else + fifo &= 0x9f; + /* Turn on FIFO for enabled channels */ + fifo |= fifo_setting[enable]; + pci_write_config_byte(pdev, 0x43, fifo); + } +} - .data_xfer = ata_pio_data_xfer_noirq, +static void via_fixup(struct pci_dev *pdev, const struct via_isa_bridge *config) +{ + u32 timing; - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, + /* Initialise the FIFO for the enabled channels. */ + via_config_fifo(pdev, config->flags); - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop -}; + if (config->udma_mask == ATA_UDMA4) { + /* The 66 MHz devices require we enable the clock */ + pci_read_config_dword(pdev, 0x50, &timing); + timing |= 0x80008; + pci_write_config_dword(pdev, 0x50, timing); + } + if (config->flags & VIA_BAD_CLK66) { + /* Disable the 66MHz clock on problem devices */ + pci_read_config_dword(pdev, 0x50, &timing); + timing &= ~0x80008; + pci_write_config_dword(pdev, 0x50, timing); + } +} /** * via_init_one - discovery callback - * @pdev: PCI device ID + * @pdev: PCI device * @id: PCI table info * * A VIA IDE interface has been discovered. Figure out what revision @@ -380,186 +541,179 @@ static struct ata_port_operations via_port_ops_noirq = { static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { /* Early VIA without UDMA support */ - static struct ata_port_info via_mwdma_info = { - .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + static const struct ata_port_info via_mwdma_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .port_ops = &via_port_ops }; /* Ditto with IRQ masking required */ - static struct ata_port_info via_mwdma_info_borked = { - .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, + static const struct ata_port_info via_mwdma_info_borked = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, .port_ops = &via_port_ops_noirq, }; /* VIA UDMA 33 devices (and borked 66) */ - static struct ata_port_info via_udma33_info = { - .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7, + static const struct ata_port_info via_udma33_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA2, .port_ops = &via_port_ops }; /* VIA UDMA 66 devices */ - static struct ata_port_info via_udma66_info = { - .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x1f, + static const struct ata_port_info via_udma66_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA4, .port_ops = &via_port_ops }; /* VIA UDMA 100 devices */ - static struct ata_port_info via_udma100_info = { - .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x3f, + static const struct ata_port_info via_udma100_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &via_port_ops }; /* UDMA133 with bad AST (All current 133) */ - static struct ata_port_info via_udma133_info = { - .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7f, /* FIXME: should check north bridge */ + static const struct ata_port_info via_udma133_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, /* FIXME: should check north bridge */ .port_ops = &via_port_ops }; - struct ata_port_info *port_info[2], *type; - struct pci_dev *isa = NULL; + const struct ata_port_info *ppi[] = { NULL, NULL }; + struct pci_dev *isa; const struct via_isa_bridge *config; - static int printed_version; - u8 t; u8 enable; - u32 timing; + unsigned long flags = id->driver_data; + int rc; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + if (flags & VIA_IDFLAG_SINGLE) + ppi[1] = &ata_dummy_port_info; /* To find out how the IDE will behave and what features we actually have to look at the bridge not the IDE controller */ - for (config = via_isa_bridges; config->id; config++) + for (config = via_isa_bridges; config->id != PCI_DEVICE_ID_VIA_ANON; + config++) if ((isa = pci_get_device(PCI_VENDOR_ID_VIA + !!(config->flags & VIA_BAD_ID), config->id, NULL))) { + u8 rev = isa->revision; + pci_dev_put(isa); - pci_read_config_byte(isa, PCI_REVISION_ID, &t); - if (t >= config->rev_min && - t <= config->rev_max) + if ((id->device == 0x0415 || id->device == 0x3164) && + (config->id != id->device)) + continue; + + if (rev >= config->rev_min && rev <= config->rev_max) break; - pci_dev_put(isa); } - if (!config->id) { - printk(KERN_WARNING "via: Unknown VIA SouthBridge, disabling.\n"); - return -ENODEV; + if (!(config->flags & VIA_NO_ENABLES)) { + /* 0x40 low bits indicate enabled channels */ + pci_read_config_byte(pdev, 0x40 , &enable); + enable &= 3; + if (enable == 0) + return -ENODEV; } - pci_dev_put(isa); - /* 0x40 low bits indicate enabled channels */ - pci_read_config_byte(pdev, 0x40 , &enable); - enable &= 3; - if (enable == 0) { + /* Clock set up */ + switch (config->udma_mask) { + case 0x00: + if (config->flags & VIA_NO_UNMASK) + ppi[0] = &via_mwdma_info_borked; + else + ppi[0] = &via_mwdma_info; + break; + case ATA_UDMA2: + ppi[0] = &via_udma33_info; + break; + case ATA_UDMA4: + ppi[0] = &via_udma66_info; + break; + case ATA_UDMA5: + ppi[0] = &via_udma100_info; + break; + case ATA_UDMA6: + ppi[0] = &via_udma133_info; + break; + default: + WARN_ON(1); return -ENODEV; - } + } - /* Initialise the FIFO for the enabled channels. */ - if (config->flags & VIA_SET_FIFO) { - u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20}; - u8 fifo; + via_fixup(pdev, config); - pci_read_config_byte(pdev, 0x43, &fifo); + /* We have established the device type, now fire it up */ + return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0); +} - /* Clear PREQ# until DDACK# for errata */ - if (config->flags & VIA_BAD_PREQ) - fifo &= 0x7F; - else - fifo &= 0x9f; - /* Turn on FIFO for enabled channels */ - fifo |= fifo_setting[enable]; - pci_write_config_byte(pdev, 0x43, fifo); - } - /* Clock set up */ - switch(config->flags & VIA_UDMA) { - case VIA_UDMA_NONE: - if (config->flags & VIA_NO_UNMASK) - type = &via_mwdma_info_borked; - else - type = &via_mwdma_info; - break; - case VIA_UDMA_33: - type = &via_udma33_info; - break; - case VIA_UDMA_66: - type = &via_udma66_info; - /* The 66 MHz devices require we enable the clock */ - pci_read_config_dword(pdev, 0x50, &timing); - timing |= 0x80008; - pci_write_config_dword(pdev, 0x50, timing); - break; - case VIA_UDMA_100: - type = &via_udma100_info; - break; - case VIA_UDMA_133: - type = &via_udma133_info; - break; - default: - WARN_ON(1); - return -ENODEV; - } +#ifdef CONFIG_PM_SLEEP +/** + * via_reinit_one - reinit after resume + * @pdev; PCI device + * + * Called when the VIA PATA device is resumed. We must then + * reconfigure the fifo and other setup we may have altered. In + * addition the kernel needs to have the resume methods on PCI + * quirk supported. + */ - if (config->flags & VIA_BAD_CLK66) { - /* Disable the 66MHz clock on problem devices */ - pci_read_config_dword(pdev, 0x50, &timing); - timing &= ~0x80008; - pci_write_config_dword(pdev, 0x50, timing); - } +static int via_reinit_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; - /* We have established the device type, now fire it up */ - type->private_data = (void *)config; + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + via_fixup(pdev, host->private_data); - port_info[0] = port_info[1] = type; - return ata_pci_init_one(pdev, port_info, 2); + ata_host_resume(host); + return 0; } +#endif static const struct pci_device_id via[] = { - { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576_1), }, - { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1), }, - { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_6410), }, - { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), }, - { 0, }, + { PCI_VDEVICE(VIA, 0x0415), }, + { PCI_VDEVICE(VIA, 0x0571), }, + { PCI_VDEVICE(VIA, 0x0581), }, + { PCI_VDEVICE(VIA, 0x1571), }, + { PCI_VDEVICE(VIA, 0x3164), }, + { PCI_VDEVICE(VIA, 0x5324), }, + { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE }, + { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE }, + + { }, }; static struct pci_driver via_pci_driver = { - .name = DRV_NAME, + .name = DRV_NAME, .id_table = via, .probe = via_init_one, - .remove = ata_pci_remove_one + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = via_reinit_one, +#endif }; -static int __init via_init(void) -{ - return pci_register_driver(&via_pci_driver); -} - - -static void __exit via_exit(void) -{ - pci_unregister_driver(&via_pci_driver); -} - +module_pci_driver(via_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for VIA PATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, via); MODULE_VERSION(DRV_VERSION); - -module_init(via_init); -module_exit(via_exit); diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index 0e23ecb77bc..f10631beffa 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c @@ -1,7 +1,7 @@ /* * pdc_adma.c - Pacific Digital Corporation ADMA * - * Maintained by: Mark Lord <mlord@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * * Copyright 2005 Mark Lord * @@ -34,27 +34,31 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/gfp.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> -#include <linux/sched.h> #include <linux/device.h> #include <scsi/scsi_host.h> -#include <asm/io.h> #include <linux/libata.h> #define DRV_NAME "pdc_adma" -#define DRV_VERSION "0.04" +#define DRV_VERSION "1.0" /* macro to calculate base address for ATA regs */ -#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40)) +#define ADMA_ATA_REGS(base, port_no) ((base) + ((port_no) * 0x40)) /* macro to calculate base address for ADMA regs */ -#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20)) +#define ADMA_REGS(base, port_no) ((base) + 0x80 + ((port_no) * 0x20)) + +/* macro to obtain addresses from ata_port */ +#define ADMA_PORT_REGS(ap) \ + ADMA_REGS((ap)->host->iomap[ADMA_MMIO_BAR], ap->port_no) enum { + ADMA_MMIO_BAR = 4, + ADMA_PORTS = 2, ADMA_CPB_BYTES = 40, ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16, @@ -88,6 +92,8 @@ enum { /* CPB bits */ cDONE = (1 << 0), + cATERR = (1 << 3), + cVLD = (1 << 0), cDAT = (1 << 2), cIEN = (1 << 3), @@ -122,78 +128,52 @@ struct adma_port_priv { adma_state_t state; }; -static int adma_ata_init_one (struct pci_dev *pdev, +static int adma_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); -static irqreturn_t adma_intr (int irq, void *dev_instance, - struct pt_regs *regs); static int adma_port_start(struct ata_port *ap); -static void adma_host_stop(struct ata_host *host); static void adma_port_stop(struct ata_port *ap); -static void adma_phy_reset(struct ata_port *ap); static void adma_qc_prep(struct ata_queued_cmd *qc); static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); static int adma_check_atapi_dma(struct ata_queued_cmd *qc); -static void adma_bmdma_stop(struct ata_queued_cmd *qc); -static u8 adma_bmdma_status(struct ata_port *ap); -static void adma_irq_clear(struct ata_port *ap); -static void adma_eng_timeout(struct ata_port *ap); +static void adma_freeze(struct ata_port *ap); +static void adma_thaw(struct ata_port *ap); +static int adma_prereset(struct ata_link *link, unsigned long deadline); static struct scsi_host_template adma_ata_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, + ATA_BASE_SHT(DRV_NAME), .sg_tablesize = LIBATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ENABLE_CLUSTERING, - .proc_name = DRV_NAME, .dma_boundary = ADMA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, }; -static const struct ata_port_operations adma_ata_ops = { - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, +static struct ata_port_operations adma_ata_ops = { + .inherits = &ata_sff_port_ops, + + .lost_interrupt = ATA_OP_NULL, + .check_atapi_dma = adma_check_atapi_dma, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - .phy_reset = adma_phy_reset, .qc_prep = adma_qc_prep, .qc_issue = adma_qc_issue, - .eng_timeout = adma_eng_timeout, - .data_xfer = ata_mmio_data_xfer, - .irq_handler = adma_intr, - .irq_clear = adma_irq_clear, + + .freeze = adma_freeze, + .thaw = adma_thaw, + .prereset = adma_prereset, + .port_start = adma_port_start, .port_stop = adma_port_stop, - .host_stop = adma_host_stop, - .bmdma_stop = adma_bmdma_stop, - .bmdma_status = adma_bmdma_status, }; static struct ata_port_info adma_port_info[] = { /* board_1841_idx */ { - .sht = &adma_ata_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | - ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | - ATA_FLAG_PIO_POLLING, - .pio_mask = 0x10, /* pio4 */ - .udma_mask = 0x1f, /* udma0-4 */ + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING, + .pio_mask = ATA_PIO4_ONLY, + .udma_mask = ATA_UDMA4, .port_ops = &adma_ata_ops, }, }; static const struct pci_device_id adma_ata_pci_tbl[] = { - { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_1841_idx }, + { PCI_VDEVICE(PDC, 0x1841), board_1841_idx }, { } /* terminate list */ }; @@ -210,23 +190,10 @@ static int adma_check_atapi_dma(struct ata_queued_cmd *qc) return 1; /* ATAPI DMA not yet supported */ } -static void adma_bmdma_stop(struct ata_queued_cmd *qc) +static void adma_reset_engine(struct ata_port *ap) { - /* nothing */ -} + void __iomem *chan = ADMA_PORT_REGS(ap); -static u8 adma_bmdma_status(struct ata_port *ap) -{ - return 0; -} - -static void adma_irq_clear(struct ata_port *ap) -{ - /* nothing */ -} - -static void adma_reset_engine(void __iomem *chan) -{ /* reset ADMA to idle state */ writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL); udelay(2); @@ -237,15 +204,14 @@ static void adma_reset_engine(void __iomem *chan) static void adma_reinit_engine(struct ata_port *ap) { struct adma_port_priv *pp = ap->private_data; - void __iomem *mmio_base = ap->host->mmio_base; - void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no); + void __iomem *chan = ADMA_PORT_REGS(ap); /* mask/clear ATA interrupts */ - writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr); - ata_check_status(ap); + writeb(ATA_NIEN, ap->ioaddr.ctl_addr); + ata_sff_check_status(ap); /* reset the ADMA engine */ - adma_reset_engine(chan); + adma_reset_engine(ap); /* set in-FIFO threshold to 0x100 */ writew(0x100, chan + ADMA_FIFO_IN); @@ -265,30 +231,42 @@ static void adma_reinit_engine(struct ata_port *ap) static inline void adma_enter_reg_mode(struct ata_port *ap) { - void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no); + void __iomem *chan = ADMA_PORT_REGS(ap); writew(aPIOMD4, chan + ADMA_CONTROL); readb(chan + ADMA_STATUS); /* flush */ } -static void adma_phy_reset(struct ata_port *ap) +static void adma_freeze(struct ata_port *ap) { - struct adma_port_priv *pp = ap->private_data; + void __iomem *chan = ADMA_PORT_REGS(ap); + + /* mask/clear ATA interrupts */ + writeb(ATA_NIEN, ap->ioaddr.ctl_addr); + ata_sff_check_status(ap); - pp->state = adma_state_idle; + /* reset ADMA to idle state */ + writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL); + udelay(2); + writew(aPIOMD4 | aNIEN, chan + ADMA_CONTROL); + udelay(2); +} + +static void adma_thaw(struct ata_port *ap) +{ adma_reinit_engine(ap); - ata_port_probe(ap); - ata_bus_reset(ap); } -static void adma_eng_timeout(struct ata_port *ap) +static int adma_prereset(struct ata_link *link, unsigned long deadline) { + struct ata_port *ap = link->ap; struct adma_port_priv *pp = ap->private_data; if (pp->state != adma_state_idle) /* healthy paranoia */ pp->state = adma_state_mmio; adma_reinit_engine(ap); - ata_eng_timeout(ap); + + return ata_sff_prereset(link, deadline); } static int adma_fill_sg(struct ata_queued_cmd *qc) @@ -296,11 +274,12 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) struct scatterlist *sg; struct ata_port *ap = qc->ap; struct adma_port_priv *pp = ap->private_data; - u8 *buf = pp->pkt; + u8 *buf = pp->pkt, *last_buf = NULL; int i = (2 + buf[3]) * 8; u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); + unsigned int si; - ata_for_each_sg(sg, qc) { + for_each_sg(qc->sg, sg, qc->n_elem, si) { u32 addr; u32 len; @@ -312,20 +291,23 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) *(__le32 *)(buf + i) = cpu_to_le32(len); i += 4; - if (ata_sg_is_last(sg, qc)) - pFLAGS |= pEND; + last_buf = &buf[i]; buf[i++] = pFLAGS; buf[i++] = qc->dev->dma_mode & 0xf; buf[i++] = 0; /* pPKLW */ buf[i++] = 0; /* reserved */ - *(__le32 *)(buf + i) - = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4); + *(__le32 *)(buf + i) = + (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4); i += 4; VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4, (unsigned long)addr, len); } + + if (likely(last_buf)) + *last_buf |= pEND; + return i; } @@ -339,10 +321,8 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) VPRINTK("ENTER\n"); adma_enter_reg_mode(qc->ap); - if (qc->tf.protocol != ATA_PROT_DMA) { - ata_qc_prep(qc); + if (qc->tf.protocol != ATA_PROT_DMA) return; - } buf[i++] = 0; /* Response flags */ buf[i++] = 0; /* reserved */ @@ -412,7 +392,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) static inline void adma_packet_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no); + void __iomem *chan = ADMA_PORT_REGS(ap); VPRINTK("ENTER, ap %p\n", ap); @@ -430,7 +410,7 @@ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc) adma_packet_start(qc); return 0; - case ATA_PROT_ATAPI_DMA: + case ATAPI_PROT_DMA: BUG(); break; @@ -439,38 +419,54 @@ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc) } pp->state = adma_state_mmio; - return ata_qc_issue_prot(qc); + return ata_sff_qc_issue(qc); } static inline unsigned int adma_intr_pkt(struct ata_host *host) { unsigned int handled = 0, port_no; - u8 __iomem *mmio_base = host->mmio_base; for (port_no = 0; port_no < host->n_ports; ++port_no) { struct ata_port *ap = host->ports[port_no]; struct adma_port_priv *pp; struct ata_queued_cmd *qc; - void __iomem *chan = ADMA_REGS(mmio_base, port_no); + void __iomem *chan = ADMA_PORT_REGS(ap); u8 status = readb(chan + ADMA_STATUS); if (status == 0) continue; handled = 1; adma_enter_reg_mode(ap); - if (ap->flags & ATA_FLAG_DISABLED) - continue; pp = ap->private_data; if (!pp || pp->state != adma_state_pkt) continue; - qc = ata_qc_from_tag(ap, ap->active_tag); + qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { - if ((status & (aPERR | aPSD | aUIRQ))) + if (status & aPERR) + qc->err_mask |= AC_ERR_HOST_BUS; + else if ((status & (aPSD | aUIRQ))) qc->err_mask |= AC_ERR_OTHER; + + if (pp->pkt[0] & cATERR) + qc->err_mask |= AC_ERR_DEV; else if (pp->pkt[0] != cDONE) qc->err_mask |= AC_ERR_OTHER; - ata_qc_complete(qc); + if (!qc->err_mask) + ata_qc_complete(qc); + else { + struct ata_eh_info *ehi = &ap->link.eh_info; + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, + "ADMA-status 0x%02X", status); + ata_ehi_push_desc(ehi, + "pkt[0] 0x%02X", pp->pkt[0]); + + if (qc->err_mask == AC_ERR_DEV) + ata_port_abort(ap); + else + ata_port_freeze(ap); + } } } return handled; @@ -481,35 +477,44 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host) unsigned int handled = 0, port_no; for (port_no = 0; port_no < host->n_ports; ++port_no) { - struct ata_port *ap; - ap = host->ports[port_no]; - if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) { - struct ata_queued_cmd *qc; - struct adma_port_priv *pp = ap->private_data; - if (!pp || pp->state != adma_state_mmio) + struct ata_port *ap = host->ports[port_no]; + struct adma_port_priv *pp = ap->private_data; + struct ata_queued_cmd *qc; + + if (!pp || pp->state != adma_state_mmio) + continue; + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { + + /* check main status, clearing INTRQ */ + u8 status = ata_sff_check_status(ap); + if ((status & ATA_BUSY)) continue; - qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { - - /* check main status, clearing INTRQ */ - u8 status = ata_check_status(ap); - if ((status & ATA_BUSY)) - continue; - DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", - ap->id, qc->tf.protocol, status); - - /* complete taskfile transaction */ - pp->state = adma_state_idle; - qc->err_mask |= ac_err_mask(status); + DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", + ap->print_id, qc->tf.protocol, status); + + /* complete taskfile transaction */ + pp->state = adma_state_idle; + qc->err_mask |= ac_err_mask(status); + if (!qc->err_mask) ata_qc_complete(qc); - handled = 1; + else { + struct ata_eh_info *ehi = &ap->link.eh_info; + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "status 0x%02X", status); + + if (qc->err_mask == AC_ERR_DEV) + ata_port_abort(ap); + else + ata_port_freeze(ap); } + handled = 1; } } return handled; } -static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs) +static irqreturn_t adma_intr(int irq, void *dev_instance) { struct ata_host *host = dev_instance; unsigned int handled = 0; @@ -525,7 +530,7 @@ static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs) return IRQ_RETVAL(handled); } -static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base) +static void adma_ata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = port->data_addr = base + 0x000; @@ -546,195 +551,118 @@ static int adma_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct adma_port_priv *pp; - int rc; - rc = ata_port_start(ap); - if (rc) - return rc; adma_enter_reg_mode(ap); - rc = -ENOMEM; - pp = kcalloc(1, sizeof(*pp), GFP_KERNEL); + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) - goto err_out; - pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma, - GFP_KERNEL); + return -ENOMEM; + pp->pkt = dmam_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma, + GFP_KERNEL); if (!pp->pkt) - goto err_out_kfree; + return -ENOMEM; /* paranoia? */ if ((pp->pkt_dma & 7) != 0) { - printk("bad alignment for pp->pkt_dma: %08x\n", + printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n", (u32)pp->pkt_dma); - dma_free_coherent(dev, ADMA_PKT_BYTES, - pp->pkt, pp->pkt_dma); - goto err_out_kfree; + return -ENOMEM; } memset(pp->pkt, 0, ADMA_PKT_BYTES); ap->private_data = pp; adma_reinit_engine(ap); return 0; - -err_out_kfree: - kfree(pp); -err_out: - ata_port_stop(ap); - return rc; } static void adma_port_stop(struct ata_port *ap) { - struct device *dev = ap->host->dev; - struct adma_port_priv *pp = ap->private_data; - - adma_reset_engine(ADMA_REGS(ap->host->mmio_base, ap->port_no)); - if (pp != NULL) { - ap->private_data = NULL; - if (pp->pkt != NULL) - dma_free_coherent(dev, ADMA_PKT_BYTES, - pp->pkt, pp->pkt_dma); - kfree(pp); - } - ata_port_stop(ap); + adma_reset_engine(ap); } -static void adma_host_stop(struct ata_host *host) +static void adma_host_init(struct ata_host *host, unsigned int chip_id) { unsigned int port_no; - for (port_no = 0; port_no < ADMA_PORTS; ++port_no) - adma_reset_engine(ADMA_REGS(host->mmio_base, port_no)); - - ata_pci_host_stop(host); -} - -static void adma_host_init(unsigned int chip_id, - struct ata_probe_ent *probe_ent) -{ - unsigned int port_no; - void __iomem *mmio_base = probe_ent->mmio_base; - /* enable/lock aGO operation */ - writeb(7, mmio_base + ADMA_MODE_LOCK); + writeb(7, host->iomap[ADMA_MMIO_BAR] + ADMA_MODE_LOCK); /* reset the ADMA logic */ for (port_no = 0; port_no < ADMA_PORTS; ++port_no) - adma_reset_engine(ADMA_REGS(mmio_base, port_no)); + adma_reset_engine(host->ports[port_no]); } static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) { int rc; - rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit DMA enable failed\n"); + dev_err(&pdev->dev, "32-bit DMA enable failed\n"); return rc; } - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit consistent DMA enable failed\n"); + dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } return 0; } static int adma_ata_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { - static int printed_version; - struct ata_probe_ent *probe_ent = NULL; - void __iomem *mmio_base; unsigned int board_idx = (unsigned int) ent->driver_data; + const struct ata_port_info *ppi[] = { &adma_port_info[board_idx], NULL }; + struct ata_host *host; + void __iomem *mmio_base; int rc, port_no; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - rc = pci_enable_device(pdev); - if (rc) - return rc; + /* alloc host */ + host = ata_host_alloc_pinfo(&pdev->dev, ppi, ADMA_PORTS); + if (!host) + return -ENOMEM; - rc = pci_request_regions(pdev, DRV_NAME); + /* acquire resources and fill host */ + rc = pcim_enable_device(pdev); if (rc) - goto err_out; + return rc; - if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) { - rc = -ENODEV; - goto err_out_regions; - } + if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) + return -ENODEV; - mmio_base = pci_iomap(pdev, 4, 0); - if (mmio_base == NULL) { - rc = -ENOMEM; - goto err_out_regions; - } + rc = pcim_iomap_regions(pdev, 1 << ADMA_MMIO_BAR, DRV_NAME); + if (rc) + return rc; + host->iomap = pcim_iomap_table(pdev); + mmio_base = host->iomap[ADMA_MMIO_BAR]; rc = adma_set_dma_masks(pdev, mmio_base); if (rc) - goto err_out_iounmap; - - probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL); - if (probe_ent == NULL) { - rc = -ENOMEM; - goto err_out_iounmap; - } - - probe_ent->dev = pci_dev_to_dev(pdev); - INIT_LIST_HEAD(&probe_ent->node); + return rc; - probe_ent->sht = adma_port_info[board_idx].sht; - probe_ent->port_flags = adma_port_info[board_idx].flags; - probe_ent->pio_mask = adma_port_info[board_idx].pio_mask; - probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask; - probe_ent->udma_mask = adma_port_info[board_idx].udma_mask; - probe_ent->port_ops = adma_port_info[board_idx].port_ops; + for (port_no = 0; port_no < ADMA_PORTS; ++port_no) { + struct ata_port *ap = host->ports[port_no]; + void __iomem *port_base = ADMA_ATA_REGS(mmio_base, port_no); + unsigned int offset = port_base - mmio_base; - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->mmio_base = mmio_base; - probe_ent->n_ports = ADMA_PORTS; + adma_ata_setup_port(&ap->ioaddr, port_base); - for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) { - adma_ata_setup_port(&probe_ent->port[port_no], - ADMA_ATA_REGS((unsigned long)mmio_base, port_no)); + ata_port_pbar_desc(ap, ADMA_MMIO_BAR, -1, "mmio"); + ata_port_pbar_desc(ap, ADMA_MMIO_BAR, offset, "port"); } - pci_set_master(pdev); - /* initialize adapter */ - adma_host_init(board_idx, probe_ent); - - rc = ata_device_add(probe_ent); - kfree(probe_ent); - if (rc != ADMA_PORTS) - goto err_out_iounmap; - return 0; - -err_out_iounmap: - pci_iounmap(pdev, mmio_base); -err_out_regions: - pci_release_regions(pdev); -err_out: - pci_disable_device(pdev); - return rc; -} + adma_host_init(host, board_idx); -static int __init adma_ata_init(void) -{ - return pci_register_driver(&adma_ata_pci_driver); + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, adma_intr, IRQF_SHARED, + &adma_ata_sht); } -static void __exit adma_ata_exit(void) -{ - pci_unregister_driver(&adma_ata_pci_driver); -} +module_pci_driver(adma_ata_pci_driver); MODULE_AUTHOR("Mark Lord"); MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl); MODULE_VERSION(DRV_VERSION); - -module_init(adma_ata_init); -module_exit(adma_ata_exit); diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c new file mode 100644 index 00000000000..0bb2cabd219 --- /dev/null +++ b/drivers/ata/sata_dwc_460ex.c @@ -0,0 +1,1822 @@ +/* + * drivers/ata/sata_dwc_460ex.c + * + * Synopsys DesignWare Cores (DWC) SATA host driver + * + * Author: Mark Miesfeld <mmiesfeld@amcc.com> + * + * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de> + * Copyright 2008 DENX Software Engineering + * + * Based on versions provided by AMCC and Synopsys which are: + * Copyright 2006 Applied Micro Circuits Corporation + * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifdef CONFIG_SATA_DWC_DEBUG +#define DEBUG +#endif + +#ifdef CONFIG_SATA_DWC_VDEBUG +#define VERBOSE_DEBUG +#define DEBUG_NCQ +#endif + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/libata.h> +#include <linux/slab.h> +#include "libata.h" + +#include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> + +/* These two are defined in "libata.h" */ +#undef DRV_NAME +#undef DRV_VERSION + +#define DRV_NAME "sata-dwc" +#define DRV_VERSION "1.3" + +/* SATA DMA driver Globals */ +#define DMA_NUM_CHANS 1 +#define DMA_NUM_CHAN_REGS 8 + +/* SATA DMA Register definitions */ +#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/ + +struct dmareg { + u32 low; /* Low bits 0-31 */ + u32 high; /* High bits 32-63 */ +}; + +/* DMA Per Channel registers */ +struct dma_chan_regs { + struct dmareg sar; /* Source Address */ + struct dmareg dar; /* Destination address */ + struct dmareg llp; /* Linked List Pointer */ + struct dmareg ctl; /* Control */ + struct dmareg sstat; /* Source Status not implemented in core */ + struct dmareg dstat; /* Destination Status not implemented in core*/ + struct dmareg sstatar; /* Source Status Address not impl in core */ + struct dmareg dstatar; /* Destination Status Address not implemente */ + struct dmareg cfg; /* Config */ + struct dmareg sgr; /* Source Gather */ + struct dmareg dsr; /* Destination Scatter */ +}; + +/* Generic Interrupt Registers */ +struct dma_interrupt_regs { + struct dmareg tfr; /* Transfer Interrupt */ + struct dmareg block; /* Block Interrupt */ + struct dmareg srctran; /* Source Transfer Interrupt */ + struct dmareg dsttran; /* Dest Transfer Interrupt */ + struct dmareg error; /* Error */ +}; + +struct ahb_dma_regs { + struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS]; + struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */ + struct dma_interrupt_regs interrupt_status; /* Interrupt Status */ + struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */ + struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */ + struct dmareg statusInt; /* Interrupt combined*/ + struct dmareg rq_srcreg; /* Src Trans Req */ + struct dmareg rq_dstreg; /* Dst Trans Req */ + struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req*/ + struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req*/ + struct dmareg rq_lst_srcreg; /* Last Src Trans Req*/ + struct dmareg rq_lst_dstreg; /* Last Dst Trans Req*/ + struct dmareg dma_cfg; /* DMA Config */ + struct dmareg dma_chan_en; /* DMA Channel Enable*/ + struct dmareg dma_id; /* DMA ID */ + struct dmareg dma_test; /* DMA Test */ + struct dmareg res1; /* reserved */ + struct dmareg res2; /* reserved */ + /* + * DMA Comp Params + * Param 6 = dma_param[0], Param 5 = dma_param[1], + * Param 4 = dma_param[2] ... + */ + struct dmareg dma_params[6]; +}; + +/* Data structure for linked list item */ +struct lli { + u32 sar; /* Source Address */ + u32 dar; /* Destination address */ + u32 llp; /* Linked List Pointer */ + struct dmareg ctl; /* Control */ + struct dmareg dstat; /* Destination Status */ +}; + +enum { + SATA_DWC_DMAC_LLI_SZ = (sizeof(struct lli)), + SATA_DWC_DMAC_LLI_NUM = 256, + SATA_DWC_DMAC_LLI_TBL_SZ = (SATA_DWC_DMAC_LLI_SZ * \ + SATA_DWC_DMAC_LLI_NUM), + SATA_DWC_DMAC_TWIDTH_BYTES = 4, + SATA_DWC_DMAC_CTRL_TSIZE_MAX = (0x00000800 * \ + SATA_DWC_DMAC_TWIDTH_BYTES), +}; + +/* DMA Register Operation Bits */ +enum { + DMA_EN = 0x00000001, /* Enable AHB DMA */ + DMA_CTL_LLP_SRCEN = 0x10000000, /* Blk chain enable Src */ + DMA_CTL_LLP_DSTEN = 0x08000000, /* Blk chain enable Dst */ +}; + +#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */ +#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */ + /* Enable channel */ +#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \ + ((0x000000001 << (ch)) << 8)) + /* Disable channel */ +#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8)) + /* Transfer Type & Flow Controller */ +#define DMA_CTL_TTFC(type) (((type) & 0x7) << 20) +#define DMA_CTL_SMS(num) (((num) & 0x3) << 25) /* Src Master Select */ +#define DMA_CTL_DMS(num) (((num) & 0x3) << 23)/* Dst Master Select */ + /* Src Burst Transaction Length */ +#define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14) + /* Dst Burst Transaction Length */ +#define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11) + /* Source Transfer Width */ +#define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4) + /* Destination Transfer Width */ +#define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1) + +/* Assign HW handshaking interface (x) to destination / source peripheral */ +#define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11) +#define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7) +#define DMA_CFG_HW_CH_PRIOR(int_num) (((int_num) & 0xF) << 5) +#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master)) + +/* + * This define is used to set block chaining disabled in the control low + * register. It is already in little endian format so it can be &'d dirctly. + * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN)) + */ +enum { + DMA_CTL_LLP_DISABLE_LE32 = 0xffffffe7, + DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */ + DMA_CTL_TTFC_M2P_PER = 0x00000003, /* Mem to per, peripheral cntr */ + DMA_CTL_SINC_INC = 0x00000000, /* Source Address Increment */ + DMA_CTL_SINC_DEC = 0x00000200, + DMA_CTL_SINC_NOCHANGE = 0x00000400, + DMA_CTL_DINC_INC = 0x00000000, /* Destination Address Increment */ + DMA_CTL_DINC_DEC = 0x00000080, + DMA_CTL_DINC_NOCHANGE = 0x00000100, + DMA_CTL_INT_EN = 0x00000001, /* Interrupt Enable */ + +/* Channel Configuration Register high bits */ + DMA_CFG_FCMOD_REQ = 0x00000001, /* Flow Control - request based */ + DMA_CFG_PROTCTL = (0x00000003 << 2),/* Protection Control */ + +/* Channel Configuration Register low bits */ + DMA_CFG_RELD_DST = 0x80000000, /* Reload Dest / Src Addr */ + DMA_CFG_RELD_SRC = 0x40000000, + DMA_CFG_HS_SELSRC = 0x00000800, /* Software handshake Src/ Dest */ + DMA_CFG_HS_SELDST = 0x00000400, + DMA_CFG_FIFOEMPTY = (0x00000001 << 9), /* FIFO Empty bit */ + +/* Channel Linked List Pointer Register */ + DMA_LLP_AHBMASTER1 = 0, /* List Master Select */ + DMA_LLP_AHBMASTER2 = 1, + + SATA_DWC_MAX_PORTS = 1, + + SATA_DWC_SCR_OFFSET = 0x24, + SATA_DWC_REG_OFFSET = 0x64, +}; + +/* DWC SATA Registers */ +struct sata_dwc_regs { + u32 fptagr; /* 1st party DMA tag */ + u32 fpbor; /* 1st party DMA buffer offset */ + u32 fptcr; /* 1st party DMA Xfr count */ + u32 dmacr; /* DMA Control */ + u32 dbtsr; /* DMA Burst Transac size */ + u32 intpr; /* Interrupt Pending */ + u32 intmr; /* Interrupt Mask */ + u32 errmr; /* Error Mask */ + u32 llcr; /* Link Layer Control */ + u32 phycr; /* PHY Control */ + u32 physr; /* PHY Status */ + u32 rxbistpd; /* Recvd BIST pattern def register */ + u32 rxbistpd1; /* Recvd BIST data dword1 */ + u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ + u32 txbistpd; /* Trans BIST pattern def register */ + u32 txbistpd1; /* Trans BIST data dword1 */ + u32 txbistpd2; /* Trans BIST data dword2 */ + u32 bistcr; /* BIST Control Register */ + u32 bistfctr; /* BIST FIS Count Register */ + u32 bistsr; /* BIST Status Register */ + u32 bistdecr; /* BIST Dword Error count register */ + u32 res[15]; /* Reserved locations */ + u32 testr; /* Test Register */ + u32 versionr; /* Version Register */ + u32 idr; /* ID Register */ + u32 unimpl[192]; /* Unimplemented */ + u32 dmadr[256]; /* FIFO Locations in DMA Mode */ +}; + +enum { + SCR_SCONTROL_DET_ENABLE = 0x00000001, + SCR_SSTATUS_DET_PRESENT = 0x00000001, + SCR_SERROR_DIAG_X = 0x04000000, +/* DWC SATA Register Operations */ + SATA_DWC_TXFIFO_DEPTH = 0x01FF, + SATA_DWC_RXFIFO_DEPTH = 0x01FF, + SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004, + SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN), + SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN), + SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN, + SATA_DWC_INTPR_DMAT = 0x00000001, + SATA_DWC_INTPR_NEWFP = 0x00000002, + SATA_DWC_INTPR_PMABRT = 0x00000004, + SATA_DWC_INTPR_ERR = 0x00000008, + SATA_DWC_INTPR_NEWBIST = 0x00000010, + SATA_DWC_INTPR_IPF = 0x10000000, + SATA_DWC_INTMR_DMATM = 0x00000001, + SATA_DWC_INTMR_NEWFPM = 0x00000002, + SATA_DWC_INTMR_PMABRTM = 0x00000004, + SATA_DWC_INTMR_ERRM = 0x00000008, + SATA_DWC_INTMR_NEWBISTM = 0x00000010, + SATA_DWC_LLCR_SCRAMEN = 0x00000001, + SATA_DWC_LLCR_DESCRAMEN = 0x00000002, + SATA_DWC_LLCR_RPDEN = 0x00000004, +/* This is all error bits, zero's are reserved fields. */ + SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03 +}; + +#define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F) +#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH) +#define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\ + << 16) +struct sata_dwc_device { + struct device *dev; /* generic device struct */ + struct ata_probe_ent *pe; /* ptr to probe-ent */ + struct ata_host *host; + u8 *reg_base; + struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ + int irq_dma; +}; + +#define SATA_DWC_QCMD_MAX 32 + +struct sata_dwc_device_port { + struct sata_dwc_device *hsdev; + int cmd_issued[SATA_DWC_QCMD_MAX]; + struct lli *llit[SATA_DWC_QCMD_MAX]; /* DMA LLI table */ + dma_addr_t llit_dma[SATA_DWC_QCMD_MAX]; + u32 dma_chan[SATA_DWC_QCMD_MAX]; + int dma_pending[SATA_DWC_QCMD_MAX]; +}; + +/* + * Commonly used DWC SATA driver Macros + */ +#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\ + (host)->private_data) +#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\ + (ap)->host->private_data) +#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\ + (ap)->private_data) +#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\ + (qc)->ap->host->private_data) +#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\ + (hsdevp)->hsdev) + +enum { + SATA_DWC_CMD_ISSUED_NOT = 0, + SATA_DWC_CMD_ISSUED_PEND = 1, + SATA_DWC_CMD_ISSUED_EXEC = 2, + SATA_DWC_CMD_ISSUED_NODATA = 3, + + SATA_DWC_DMA_PENDING_NONE = 0, + SATA_DWC_DMA_PENDING_TX = 1, + SATA_DWC_DMA_PENDING_RX = 2, +}; + +struct sata_dwc_host_priv { + void __iomem *scr_addr_sstatus; + u32 sata_dwc_sactive_issued ; + u32 sata_dwc_sactive_queued ; + u32 dma_interrupt_count; + struct ahb_dma_regs *sata_dma_regs; + struct device *dwc_dev; + int dma_channel; +}; +struct sata_dwc_host_priv host_pvt; +/* + * Prototypes + */ +static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); +static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, + u32 check_status); +static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); +static void sata_dwc_port_stop(struct ata_port *ap); +static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); +static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq); +static void dma_dwc_exit(struct sata_dwc_device *hsdev); +static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems, + struct lli *lli, dma_addr_t dma_lli, + void __iomem *addr, int dir); +static void dma_dwc_xfer_start(int dma_ch); + +static const char *get_prot_descript(u8 protocol) +{ + switch ((enum ata_tf_protocols)protocol) { + case ATA_PROT_NODATA: + return "ATA no data"; + case ATA_PROT_PIO: + return "ATA PIO"; + case ATA_PROT_DMA: + return "ATA DMA"; + case ATA_PROT_NCQ: + return "ATA NCQ"; + case ATAPI_PROT_NODATA: + return "ATAPI no data"; + case ATAPI_PROT_PIO: + return "ATAPI PIO"; + case ATAPI_PROT_DMA: + return "ATAPI DMA"; + default: + return "unknown"; + } +} + +static const char *get_dma_dir_descript(int dma_dir) +{ + switch ((enum dma_data_direction)dma_dir) { + case DMA_BIDIRECTIONAL: + return "bidirectional"; + case DMA_TO_DEVICE: + return "to device"; + case DMA_FROM_DEVICE: + return "from device"; + default: + return "none"; + } +} + +static void sata_dwc_tf_dump(struct ata_taskfile *tf) +{ + dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:" + "0x%lx device: %x\n", tf->command, + get_prot_descript(tf->protocol), tf->flags, tf->device); + dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x " + "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal, + tf->lbam, tf->lbah); + dev_vdbg(host_pvt.dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x " + "hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", + tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, + tf->hob_lbah); +} + +/* + * Function: get_burst_length_encode + * arguments: datalength: length in bytes of data + * returns value to be programmed in register corresponding to data length + * This value is effectively the log(base 2) of the length + */ +static int get_burst_length_encode(int datalength) +{ + int items = datalength >> 2; /* div by 4 to get lword count */ + + if (items >= 64) + return 5; + + if (items >= 32) + return 4; + + if (items >= 16) + return 3; + + if (items >= 8) + return 2; + + if (items >= 4) + return 1; + + return 0; +} + +static void clear_chan_interrupts(int c) +{ + out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low), + DMA_CHANNEL(c)); + out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low), + DMA_CHANNEL(c)); + out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low), + DMA_CHANNEL(c)); + out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low), + DMA_CHANNEL(c)); + out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low), + DMA_CHANNEL(c)); +} + +/* + * Function: dma_request_channel + * arguments: None + * returns channel number if available else -1 + * This function assigns the next available DMA channel from the list to the + * requester + */ +static int dma_request_channel(void) +{ + /* Check if the channel is not currently in use */ + if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) & + DMA_CHANNEL(host_pvt.dma_channel))) + return host_pvt.dma_channel; + dev_err(host_pvt.dwc_dev, "%s Channel %d is currently in use\n", + __func__, host_pvt.dma_channel); + return -1; +} + +/* + * Function: dma_dwc_interrupt + * arguments: irq, dev_id, pt_regs + * returns channel number if available else -1 + * Interrupt Handler for DW AHB SATA DMA + */ +static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance) +{ + int chan; + u32 tfr_reg, err_reg; + unsigned long flags; + struct sata_dwc_device *hsdev = hsdev_instance; + struct ata_host *host = (struct ata_host *)hsdev->host; + struct ata_port *ap; + struct sata_dwc_device_port *hsdevp; + u8 tag = 0; + unsigned int port = 0; + + spin_lock_irqsave(&host->lock, flags); + ap = host->ports[port]; + hsdevp = HSDEVP_FROM_AP(ap); + tag = ap->link.active_tag; + + tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\ + .low)); + err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\ + .low)); + + dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n", + tfr_reg, err_reg, hsdevp->dma_pending[tag], port); + + chan = host_pvt.dma_channel; + if (chan >= 0) { + /* Check for end-of-transfer interrupt. */ + if (tfr_reg & DMA_CHANNEL(chan)) { + /* + * Each DMA command produces 2 interrupts. Only + * complete the command after both interrupts have been + * seen. (See sata_dwc_isr()) + */ + host_pvt.dma_interrupt_count++; + sata_dwc_clear_dmacr(hsdevp, tag); + + if (hsdevp->dma_pending[tag] == + SATA_DWC_DMA_PENDING_NONE) { + dev_err(ap->dev, "DMA not pending eot=0x%08x " + "err=0x%08x tag=0x%02x pending=%d\n", + tfr_reg, err_reg, tag, + hsdevp->dma_pending[tag]); + } + + if ((host_pvt.dma_interrupt_count % 2) == 0) + sata_dwc_dma_xfer_complete(ap, 1); + + /* Clear the interrupt */ + out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\ + .tfr.low), + DMA_CHANNEL(chan)); + } + + /* Check for error interrupt. */ + if (err_reg & DMA_CHANNEL(chan)) { + /* TODO Need error handler ! */ + dev_err(ap->dev, "error interrupt err_reg=0x%08x\n", + err_reg); + + /* Clear the interrupt. */ + out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\ + .error.low), + DMA_CHANNEL(chan)); + } + } + spin_unlock_irqrestore(&host->lock, flags); + return IRQ_HANDLED; +} + +/* + * Function: dma_request_interrupts + * arguments: hsdev + * returns status + * This function registers ISR for a particular DMA channel interrupt + */ +static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq) +{ + int retval = 0; + int chan = host_pvt.dma_channel; + + if (chan >= 0) { + /* Unmask error interrupt */ + out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low, + DMA_ENABLE_CHAN(chan)); + + /* Unmask end-of-transfer interrupt */ + out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low, + DMA_ENABLE_CHAN(chan)); + } + + retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev); + if (retval) { + dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n", + __func__, irq); + return -ENODEV; + } + + /* Mark this interrupt as requested */ + hsdev->irq_dma = irq; + return 0; +} + +/* + * Function: map_sg_to_lli + * The Synopsis driver has a comment proposing that better performance + * is possible by only enabling interrupts on the last item in the linked list. + * However, it seems that could be a problem if an error happened on one of the + * first items. The transfer would halt, but no error interrupt would occur. + * Currently this function sets interrupts enabled for each linked list item: + * DMA_CTL_INT_EN. + */ +static int map_sg_to_lli(struct scatterlist *sg, int num_elems, + struct lli *lli, dma_addr_t dma_lli, + void __iomem *dmadr_addr, int dir) +{ + int i, idx = 0; + int fis_len = 0; + dma_addr_t next_llp; + int bl; + int sms_val, dms_val; + + sms_val = 0; + dms_val = 1 + host_pvt.dma_channel; + dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x" + " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli, + (u32)dmadr_addr); + + bl = get_burst_length_encode(AHB_DMA_BRST_DFLT); + + for (i = 0; i < num_elems; i++, sg++) { + u32 addr, offset; + u32 sg_len, len; + + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len" + "=%d\n", __func__, i, addr, sg_len); + + while (sg_len) { + if (idx >= SATA_DWC_DMAC_LLI_NUM) { + /* The LLI table is not large enough. */ + dev_err(host_pvt.dwc_dev, "LLI table overrun " + "(idx=%d)\n", idx); + break; + } + len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ? + SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len; + + offset = addr & 0xffff; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + /* + * Make sure a LLI block is not created that will span + * 8K max FIS boundary. If the block spans such a FIS + * boundary, there is a chance that a DMA burst will + * cross that boundary -- this results in an error in + * the host controller. + */ + if (fis_len + len > 8192) { + dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len=" + "%d(0x%x) len=%d(0x%x)\n", fis_len, + fis_len, len, len); + len = 8192 - fis_len; + fis_len = 0; + } else { + fis_len += len; + } + if (fis_len == 8192) + fis_len = 0; + + /* + * Set DMA addresses and lower half of control register + * based on direction. + */ + if (dir == DMA_FROM_DEVICE) { + lli[idx].dar = cpu_to_le32(addr); + lli[idx].sar = cpu_to_le32((u32)dmadr_addr); + + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | + DMA_CTL_SMS(sms_val) | + DMA_CTL_DMS(dms_val) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_SINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } else { /* DMA_TO_DEVICE */ + lli[idx].sar = cpu_to_le32(addr); + lli[idx].dar = cpu_to_le32((u32)dmadr_addr); + + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | + DMA_CTL_SMS(dms_val) | + DMA_CTL_DMS(sms_val) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_DINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } + + dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: " + "0x%08x val: 0x%08x\n", __func__, + len, DMA_CTL_BLK_TS(len / 4)); + + /* Program the LLI CTL high register */ + lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\ + (len / 4)); + + /* Program the next pointer. The next pointer must be + * the physical address, not the virtual address. + */ + next_llp = (dma_lli + ((idx + 1) * sizeof(struct \ + lli))); + + /* The last 2 bits encode the list master select. */ + next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2); + + lli[idx].llp = cpu_to_le32(next_llp); + idx++; + sg_len -= len; + addr += len; + } + } + + /* + * The last next ptr has to be zero and the last control low register + * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source + * and destination enable) set back to 0 (disabled.) This is what tells + * the core that this is the last item in the linked list. + */ + if (idx) { + lli[idx-1].llp = 0x00000000; + lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32; + + /* Flush cache to memory */ + dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx), + DMA_BIDIRECTIONAL); + } + + return idx; +} + +/* + * Function: dma_dwc_xfer_start + * arguments: Channel number + * Return : None + * Enables the DMA channel + */ +static void dma_dwc_xfer_start(int dma_ch) +{ + /* Enable the DMA channel */ + out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low), + in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) | + DMA_ENABLE_CHAN(dma_ch)); +} + +static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems, + struct lli *lli, dma_addr_t dma_lli, + void __iomem *addr, int dir) +{ + int dma_ch; + int num_lli; + /* Acquire DMA channel */ + dma_ch = dma_request_channel(); + if (dma_ch == -1) { + dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n", + __func__); + return -EAGAIN; + } + + /* Convert SG list to linked list of items (LLIs) for AHB DMA */ + num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir); + + dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:" + " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems, + lli, (u32)dma_lli, addr, num_lli); + + clear_chan_interrupts(dma_ch); + + /* Program the CFG register. */ + out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high), + DMA_CFG_HW_HS_SRC(dma_ch) | DMA_CFG_HW_HS_DEST(dma_ch) | + DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ); + out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low), + DMA_CFG_HW_CH_PRIOR(dma_ch)); + + /* Program the address of the linked list */ + out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low), + DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2)); + + /* Program the CTL register with src enable / dst enable */ + out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low), + DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN); + return dma_ch; +} + +/* + * Function: dma_dwc_exit + * arguments: None + * returns status + * This function exits the SATA DMA driver + */ +static void dma_dwc_exit(struct sata_dwc_device *hsdev) +{ + dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__); + if (host_pvt.sata_dma_regs) { + iounmap(host_pvt.sata_dma_regs); + host_pvt.sata_dma_regs = NULL; + } + + if (hsdev->irq_dma) { + free_irq(hsdev->irq_dma, hsdev); + hsdev->irq_dma = 0; + } +} + +/* + * Function: dma_dwc_init + * arguments: hsdev + * returns status + * This function initializes the SATA DMA driver + */ +static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) +{ + int err; + + err = dma_request_interrupts(hsdev, irq); + if (err) { + dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns" + " %d\n", __func__, err); + goto error_out; + } + + /* Enabe DMA */ + out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN); + + dev_notice(host_pvt.dwc_dev, "DMA initialized\n"); + dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\ + sata_dma_regs); + + return 0; + +error_out: + dma_dwc_exit(hsdev); + + return err; +} + +static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) +{ + if (scr > SCR_NOTIFICATION) { + dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", + __func__, scr); + return -EINVAL; + } + + *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4)); + dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", + __func__, link->ap->print_id, scr, *val); + + return 0; +} + +static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) +{ + dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", + __func__, link->ap->print_id, scr, val); + if (scr > SCR_NOTIFICATION) { + dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", + __func__, scr); + return -EINVAL; + } + out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val); + + return 0; +} + +static u32 core_scr_read(unsigned int scr) +{ + return in_le32((void __iomem *)(host_pvt.scr_addr_sstatus) +\ + (scr * 4)); +} + +static void core_scr_write(unsigned int scr, u32 val) +{ + out_le32((void __iomem *)(host_pvt.scr_addr_sstatus) + (scr * 4), + val); +} + +static void clear_serror(void) +{ + u32 val; + val = core_scr_read(SCR_ERROR); + core_scr_write(SCR_ERROR, val); + +} + +static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) +{ + out_le32(&hsdev->sata_dwc_regs->intpr, + in_le32(&hsdev->sata_dwc_regs->intpr)); +} + +static u32 qcmd_tag_to_mask(u8 tag) +{ + return 0x00000001 << (tag & 0x1f); +} + +/* See ahci.c */ +static void sata_dwc_error_intr(struct ata_port *ap, + struct sata_dwc_device *hsdev, uint intpr) +{ + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + struct ata_eh_info *ehi = &ap->link.eh_info; + unsigned int err_mask = 0, action = 0; + struct ata_queued_cmd *qc; + u32 serror; + u8 status, tag; + u32 err_reg; + + ata_ehi_clear_desc(ehi); + + serror = core_scr_read(SCR_ERROR); + status = ap->ops->sff_check_status(ap); + + err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\ + low)); + tag = ap->link.active_tag; + + dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x " + "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n", + __func__, serror, intpr, status, host_pvt.dma_interrupt_count, + hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg); + + /* Clear error register and interrupt bit */ + clear_serror(); + clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR); + + /* This is the only error happening now. TODO check for exact error */ + + err_mask |= AC_ERR_HOST_BUS; + action |= ATA_EH_RESET; + + /* Pass this on to EH */ + ehi->serror |= serror; + ehi->action |= action; + + qc = ata_qc_from_tag(ap, tag); + if (qc) + qc->err_mask |= err_mask; + else + ehi->err_mask |= err_mask; + + ata_port_abort(ap); +} + +/* + * Function : sata_dwc_isr + * arguments : irq, void *dev_instance, struct pt_regs *regs + * Return value : irqreturn_t - status of IRQ + * This Interrupt handler called via port ops registered function. + * .irq_handler = sata_dwc_isr + */ +static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) +{ + struct ata_host *host = (struct ata_host *)dev_instance; + struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host); + struct ata_port *ap; + struct ata_queued_cmd *qc; + unsigned long flags; + u8 status, tag; + int handled, num_processed, port = 0; + uint intpr, sactive, sactive2, tag_mask; + struct sata_dwc_device_port *hsdevp; + host_pvt.sata_dwc_sactive_issued = 0; + + spin_lock_irqsave(&host->lock, flags); + + /* Read the interrupt register */ + intpr = in_le32(&hsdev->sata_dwc_regs->intpr); + + ap = host->ports[port]; + hsdevp = HSDEVP_FROM_AP(ap); + + dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, + ap->link.active_tag); + + /* Check for error interrupt */ + if (intpr & SATA_DWC_INTPR_ERR) { + sata_dwc_error_intr(ap, hsdev, intpr); + handled = 1; + goto DONE; + } + + /* Check for DMA SETUP FIS (FP DMA) interrupt */ + if (intpr & SATA_DWC_INTPR_NEWFP) { + clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP); + + tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr)); + dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); + if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND) + dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); + + host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag); + + qc = ata_qc_from_tag(ap, tag); + /* + * Start FP DMA for NCQ command. At this point the tag is the + * active tag. It is the tag that matches the command about to + * be completed. + */ + qc->ap->link.active_tag = tag; + sata_dwc_bmdma_start_by_tag(qc, tag); + + handled = 1; + goto DONE; + } + sactive = core_scr_read(SCR_ACTIVE); + tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive; + + /* If no sactive issued and tag_mask is zero then this is not NCQ */ + if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) { + if (ap->link.active_tag == ATA_TAG_POISON) + tag = 0; + else + tag = ap->link.active_tag; + qc = ata_qc_from_tag(ap, tag); + + /* DEV interrupt w/ no active qc? */ + if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { + dev_err(ap->dev, "%s interrupt with no active qc " + "qc=%p\n", __func__, qc); + ap->ops->sff_check_status(ap); + handled = 1; + goto DONE; + } + status = ap->ops->sff_check_status(ap); + + qc->ap->link.active_tag = tag; + hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; + + if (status & ATA_ERR) { + dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); + sata_dwc_qc_complete(ap, qc, 1); + handled = 1; + goto DONE; + } + + dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", + __func__, get_prot_descript(qc->tf.protocol)); +DRVSTILLBUSY: + if (ata_is_dma(qc->tf.protocol)) { + /* + * Each DMA transaction produces 2 interrupts. The DMAC + * transfer complete interrupt and the SATA controller + * operation done interrupt. The command should be + * completed only after both interrupts are seen. + */ + host_pvt.dma_interrupt_count++; + if (hsdevp->dma_pending[tag] == \ + SATA_DWC_DMA_PENDING_NONE) { + dev_err(ap->dev, "%s: DMA not pending " + "intpr=0x%08x status=0x%08x pending" + "=%d\n", __func__, intpr, status, + hsdevp->dma_pending[tag]); + } + + if ((host_pvt.dma_interrupt_count % 2) == 0) + sata_dwc_dma_xfer_complete(ap, 1); + } else if (ata_is_pio(qc->tf.protocol)) { + ata_sff_hsm_move(ap, qc, status, 0); + handled = 1; + goto DONE; + } else { + if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) + goto DRVSTILLBUSY; + } + + handled = 1; + goto DONE; + } + + /* + * This is a NCQ command. At this point we need to figure out for which + * tags we have gotten a completion interrupt. One interrupt may serve + * as completion for more than one operation when commands are queued + * (NCQ). We need to process each completed command. + */ + + /* process completed commands */ + sactive = core_scr_read(SCR_ACTIVE); + tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive; + + if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \ + tag_mask > 1) { + dev_dbg(ap->dev, "%s NCQ:sactive=0x%08x sactive_issued=0x%08x" + "tag_mask=0x%08x\n", __func__, sactive, + host_pvt.sata_dwc_sactive_issued, tag_mask); + } + + if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \ + (host_pvt.sata_dwc_sactive_issued)) { + dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x " + "(host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask" + "=0x%08x\n", sactive, host_pvt.sata_dwc_sactive_issued, + tag_mask); + } + + /* read just to clear ... not bad if currently still busy */ + status = ap->ops->sff_check_status(ap); + dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status); + + tag = 0; + num_processed = 0; + while (tag_mask) { + num_processed++; + while (!(tag_mask & 0x00000001)) { + tag++; + tag_mask <<= 1; + } + + tag_mask &= (~0x00000001); + qc = ata_qc_from_tag(ap, tag); + + /* To be picked up by completion functions */ + qc->ap->link.active_tag = tag; + hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; + + /* Let libata/scsi layers handle error */ + if (status & ATA_ERR) { + dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__, + status); + sata_dwc_qc_complete(ap, qc, 1); + handled = 1; + goto DONE; + } + + /* Process completed command */ + dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, + get_prot_descript(qc->tf.protocol)); + if (ata_is_dma(qc->tf.protocol)) { + host_pvt.dma_interrupt_count++; + if (hsdevp->dma_pending[tag] == \ + SATA_DWC_DMA_PENDING_NONE) + dev_warn(ap->dev, "%s: DMA not pending?\n", + __func__); + if ((host_pvt.dma_interrupt_count % 2) == 0) + sata_dwc_dma_xfer_complete(ap, 1); + } else { + if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) + goto STILLBUSY; + } + continue; + +STILLBUSY: + ap->stats.idle_irq++; + dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n", + ap->print_id); + } /* while tag_mask */ + + /* + * Check to see if any commands completed while we were processing our + * initial set of completed commands (read status clears interrupts, + * so we might miss a completed command interrupt if one came in while + * we were processing --we read status as part of processing a completed + * command). + */ + sactive2 = core_scr_read(SCR_ACTIVE); + if (sactive2 != sactive) { + dev_dbg(ap->dev, "More completed - sactive=0x%x sactive2" + "=0x%x\n", sactive, sactive2); + } + handled = 1; + +DONE: + spin_unlock_irqrestore(&host->lock, flags); + return IRQ_RETVAL(handled); +} + +static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp); + + if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) { + out_le32(&(hsdev->sata_dwc_regs->dmacr), + SATA_DWC_DMACR_RX_CLEAR( + in_le32(&(hsdev->sata_dwc_regs->dmacr)))); + } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) { + out_le32(&(hsdev->sata_dwc_regs->dmacr), + SATA_DWC_DMACR_TX_CLEAR( + in_le32(&(hsdev->sata_dwc_regs->dmacr)))); + } else { + /* + * This should not happen, it indicates the driver is out of + * sync. If it does happen, clear dmacr anyway. + */ + dev_err(host_pvt.dwc_dev, "%s DMA protocol RX and" + "TX DMA not pending tag=0x%02x pending=%d" + " dmacr: 0x%08x\n", __func__, tag, + hsdevp->dma_pending[tag], + in_le32(&(hsdev->sata_dwc_regs->dmacr))); + out_le32(&(hsdev->sata_dwc_regs->dmacr), + SATA_DWC_DMACR_TXRXCH_CLEAR); + } +} + +static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) +{ + struct ata_queued_cmd *qc; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + u8 tag = 0; + + tag = ap->link.active_tag; + qc = ata_qc_from_tag(ap, tag); + if (!qc) { + dev_err(ap->dev, "failed to get qc"); + return; + } + +#ifdef DEBUG_NCQ + if (tag > 0) { + dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s " + "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command, + get_dma_dir_descript(qc->dma_dir), + get_prot_descript(qc->tf.protocol), + in_le32(&(hsdev->sata_dwc_regs->dmacr))); + } +#endif + + if (ata_is_dma(qc->tf.protocol)) { + if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { + dev_err(ap->dev, "%s DMA protocol RX and TX DMA not " + "pending dmacr: 0x%08x\n", __func__, + in_le32(&(hsdev->sata_dwc_regs->dmacr))); + } + + hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; + sata_dwc_qc_complete(ap, qc, check_status); + ap->link.active_tag = ATA_TAG_POISON; + } else { + sata_dwc_qc_complete(ap, qc, check_status); + } +} + +static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, + u32 check_status) +{ + u8 status = 0; + u32 mask = 0x0; + u8 tag = qc->tag; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + host_pvt.sata_dwc_sactive_queued = 0; + dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); + + if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) + dev_err(ap->dev, "TX DMA PENDING\n"); + else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) + dev_err(ap->dev, "RX DMA PENDING\n"); + dev_dbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u:" + " protocol=%d\n", qc->tf.command, status, ap->print_id, + qc->tf.protocol); + + /* clear active bit */ + mask = (~(qcmd_tag_to_mask(tag))); + host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \ + & mask; + host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \ + & mask; + ata_qc_complete(qc); + return 0; +} + +static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) +{ + /* Enable selective interrupts by setting the interrupt maskregister*/ + out_le32(&hsdev->sata_dwc_regs->intmr, + SATA_DWC_INTMR_ERRM | + SATA_DWC_INTMR_NEWFPM | + SATA_DWC_INTMR_PMABRTM | + SATA_DWC_INTMR_DMATM); + /* + * Unmask the error bits that should trigger an error interrupt by + * setting the error mask register. + */ + out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); + + dev_dbg(host_pvt.dwc_dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", + __func__, in_le32(&hsdev->sata_dwc_regs->intmr), + in_le32(&hsdev->sata_dwc_regs->errmr)); +} + +static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) +{ + port->cmd_addr = (void *)base + 0x00; + port->data_addr = (void *)base + 0x00; + + port->error_addr = (void *)base + 0x04; + port->feature_addr = (void *)base + 0x04; + + port->nsect_addr = (void *)base + 0x08; + + port->lbal_addr = (void *)base + 0x0c; + port->lbam_addr = (void *)base + 0x10; + port->lbah_addr = (void *)base + 0x14; + + port->device_addr = (void *)base + 0x18; + port->command_addr = (void *)base + 0x1c; + port->status_addr = (void *)base + 0x1c; + + port->altstatus_addr = (void *)base + 0x20; + port->ctl_addr = (void *)base + 0x20; +} + +/* + * Function : sata_dwc_port_start + * arguments : struct ata_ioports *port + * Return value : returns 0 if success, error code otherwise + * This function allocates the scatter gather LLI table for AHB DMA + */ +static int sata_dwc_port_start(struct ata_port *ap) +{ + int err = 0; + struct sata_dwc_device *hsdev; + struct sata_dwc_device_port *hsdevp = NULL; + struct device *pdev; + int i; + + hsdev = HSDEV_FROM_AP(ap); + + dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no); + + hsdev->host = ap->host; + pdev = ap->host->dev; + if (!pdev) { + dev_err(ap->dev, "%s: no ap->host->dev\n", __func__); + err = -ENODEV; + goto CLEANUP; + } + + /* Allocate Port Struct */ + hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL); + if (!hsdevp) { + dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__); + err = -ENOMEM; + goto CLEANUP; + } + hsdevp->hsdev = hsdev; + + for (i = 0; i < SATA_DWC_QCMD_MAX; i++) + hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; + + ap->bmdma_prd = 0; /* set these so libata doesn't use them */ + ap->bmdma_prd_dma = 0; + + /* + * DMA - Assign scatter gather LLI table. We can't use the libata + * version since it's PRD is IDE PCI specific. + */ + for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { + hsdevp->llit[i] = dma_alloc_coherent(pdev, + SATA_DWC_DMAC_LLI_TBL_SZ, + &(hsdevp->llit_dma[i]), + GFP_ATOMIC); + if (!hsdevp->llit[i]) { + dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", + __func__); + err = -ENOMEM; + goto CLEANUP_ALLOC; + } + } + + if (ap->port_no == 0) { + dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", + __func__); + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXRXCH_CLEAR); + + dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", + __func__); + out_le32(&hsdev->sata_dwc_regs->dbtsr, + (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); + } + + /* Clear any error bits before libata starts issuing commands */ + clear_serror(); + ap->private_data = hsdevp; + dev_dbg(ap->dev, "%s: done\n", __func__); + return 0; + +CLEANUP_ALLOC: + kfree(hsdevp); +CLEANUP: + dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id); + return err; +} + +static void sata_dwc_port_stop(struct ata_port *ap) +{ + int i; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + + dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); + + if (hsdevp && hsdev) { + /* deallocate LLI table */ + for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { + dma_free_coherent(ap->host->dev, + SATA_DWC_DMAC_LLI_TBL_SZ, + hsdevp->llit[i], hsdevp->llit_dma[i]); + } + + kfree(hsdevp); + } + ap->private_data = NULL; +} + +/* + * Function : sata_dwc_exec_command_by_tag + * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued + * Return value : None + * This function keeps track of individual command tag ids and calls + * ata_exec_command in libata + */ +static void sata_dwc_exec_command_by_tag(struct ata_port *ap, + struct ata_taskfile *tf, + u8 tag, u32 cmd_issued) +{ + unsigned long flags; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + + dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, + ata_get_cmd_descript(tf->command), tag); + + spin_lock_irqsave(&ap->host->lock, flags); + hsdevp->cmd_issued[tag] = cmd_issued; + spin_unlock_irqrestore(&ap->host->lock, flags); + /* + * Clear SError before executing a new command. + * sata_dwc_scr_write and read can not be used here. Clearing the PM + * managed SError register for the disk needs to be done before the + * task file is loaded. + */ + clear_serror(); + ata_sff_exec_command(ap, tf); +} + +static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) +{ + sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, + SATA_DWC_CMD_ISSUED_PEND); +} + +static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) +{ + u8 tag = qc->tag; + + if (ata_is_ncq(qc->tf.protocol)) { + dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", + __func__, qc->ap->link.sactive, tag); + } else { + tag = 0; + } + sata_dwc_bmdma_setup_by_tag(qc, tag); +} + +static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) +{ + int start_dma; + u32 reg, dma_chan; + struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); + struct ata_port *ap = qc->ap; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + int dir = qc->dma_dir; + dma_chan = hsdevp->dma_chan[tag]; + + if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { + start_dma = 1; + if (dir == DMA_TO_DEVICE) + hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX; + else + hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; + } else { + dev_err(ap->dev, "%s: Command not pending cmd_issued=%d " + "(tag=%d) DMA NOT started\n", __func__, + hsdevp->cmd_issued[tag], tag); + start_dma = 0; + } + + dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s " + "start_dma? %x\n", __func__, qc, tag, qc->tf.command, + get_dma_dir_descript(qc->dma_dir), start_dma); + sata_dwc_tf_dump(&(qc->tf)); + + if (start_dma) { + reg = core_scr_read(SCR_ERROR); + if (reg & SATA_DWC_SERROR_ERR_BITS) { + dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", + __func__, reg); + } + + if (dir == DMA_TO_DEVICE) + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXCHEN); + else + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_RXCHEN); + + /* Enable AHB DMA transfer on the specified channel */ + dma_dwc_xfer_start(dma_chan); + } +} + +static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) +{ + u8 tag = qc->tag; + + if (ata_is_ncq(qc->tf.protocol)) { + dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", + __func__, qc->ap->link.sactive, tag); + } else { + tag = 0; + } + dev_dbg(qc->ap->dev, "%s\n", __func__); + sata_dwc_bmdma_start_by_tag(qc, tag); +} + +/* + * Function : sata_dwc_qc_prep_by_tag + * arguments : ata_queued_cmd *qc, u8 tag + * Return value : None + * qc_prep for a particular queued command based on tag + */ +static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) +{ + struct scatterlist *sg = qc->sg; + struct ata_port *ap = qc->ap; + int dma_chan; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + + dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", + __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir), + qc->n_elem); + + dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag], + hsdevp->llit_dma[tag], + (void *__iomem)(&hsdev->sata_dwc_regs->\ + dmadr), qc->dma_dir); + if (dma_chan < 0) { + dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", + __func__, dma_chan); + return; + } + hsdevp->dma_chan[tag] = dma_chan; +} + +static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) +{ + u32 sactive; + u8 tag = qc->tag; + struct ata_port *ap = qc->ap; + +#ifdef DEBUG_NCQ + if (qc->tag > 0 || ap->link.sactive > 1) + dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d " + "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", + __func__, ap->print_id, qc->tf.command, + ata_get_cmd_descript(qc->tf.command), + qc->tag, get_prot_descript(qc->tf.protocol), + ap->link.active_tag, ap->link.sactive); +#endif + + if (!ata_is_ncq(qc->tf.protocol)) + tag = 0; + sata_dwc_qc_prep_by_tag(qc, tag); + + if (ata_is_ncq(qc->tf.protocol)) { + sactive = core_scr_read(SCR_ACTIVE); + sactive |= (0x00000001 << tag); + core_scr_write(SCR_ACTIVE, sactive); + + dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x " + "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive, + sactive); + + ap->ops->sff_tf_load(ap, &qc->tf); + sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, + SATA_DWC_CMD_ISSUED_PEND); + } else { + ata_sff_qc_issue(qc); + } + return 0; +} + +/* + * Function : sata_dwc_qc_prep + * arguments : ata_queued_cmd *qc + * Return value : None + * qc_prep for a particular queued command + */ + +static void sata_dwc_qc_prep(struct ata_queued_cmd *qc) +{ + if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO)) + return; + +#ifdef DEBUG_NCQ + if (qc->tag > 0) + dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n", + __func__, qc->tag, qc->ap->link.active_tag); + + return ; +#endif +} + +static void sata_dwc_error_handler(struct ata_port *ap) +{ + ata_sff_error_handler(ap); +} + +int sata_dwc_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); + int ret; + + ret = sata_sff_hardreset(link, class, deadline); + + sata_dwc_enable_interrupts(hsdev); + + /* Reconfigure the DMA control register */ + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXRXCH_CLEAR); + + /* Reconfigure the DMA Burst Transaction Size register */ + out_le32(&hsdev->sata_dwc_regs->dbtsr, + SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)); + + return ret; +} + +/* + * scsi mid-layer and libata interface structures + */ +static struct scsi_host_template sata_dwc_sht = { + ATA_NCQ_SHT(DRV_NAME), + /* + * test-only: Currently this driver doesn't handle NCQ + * correctly. We enable NCQ but set the queue depth to a + * max of 1. This will get fixed in in a future release. + */ + .sg_tablesize = LIBATA_MAX_PRD, + .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */ + .dma_boundary = ATA_DMA_BOUNDARY, +}; + +static struct ata_port_operations sata_dwc_ops = { + .inherits = &ata_sff_port_ops, + + .error_handler = sata_dwc_error_handler, + .hardreset = sata_dwc_hardreset, + + .qc_prep = sata_dwc_qc_prep, + .qc_issue = sata_dwc_qc_issue, + + .scr_read = sata_dwc_scr_read, + .scr_write = sata_dwc_scr_write, + + .port_start = sata_dwc_port_start, + .port_stop = sata_dwc_port_stop, + + .bmdma_setup = sata_dwc_bmdma_setup, + .bmdma_start = sata_dwc_bmdma_start, +}; + +static const struct ata_port_info sata_dwc_port_info[] = { + { + .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &sata_dwc_ops, + }, +}; + +static int sata_dwc_probe(struct platform_device *ofdev) +{ + struct sata_dwc_device *hsdev; + u32 idr, versionr; + char *ver = (char *)&versionr; + u8 *base = NULL; + int err = 0; + int irq, rc; + struct ata_host *host; + struct ata_port_info pi = sata_dwc_port_info[0]; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct device_node *np = ofdev->dev.of_node; + u32 dma_chan; + + /* Allocate DWC SATA device */ + hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL); + if (hsdev == NULL) { + dev_err(&ofdev->dev, "kmalloc failed for hsdev\n"); + err = -ENOMEM; + goto error; + } + + if (of_property_read_u32(np, "dma-channel", &dma_chan)) { + dev_warn(&ofdev->dev, "no dma-channel property set." + " Use channel 0\n"); + dma_chan = 0; + } + host_pvt.dma_channel = dma_chan; + + /* Ioremap SATA registers */ + base = of_iomap(ofdev->dev.of_node, 0); + if (!base) { + dev_err(&ofdev->dev, "ioremap failed for SATA register" + " address\n"); + err = -ENODEV; + goto error_kmalloc; + } + hsdev->reg_base = base; + dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); + + /* Synopsys DWC SATA specific Registers */ + hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET); + + /* Allocate and fill host */ + host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); + if (!host) { + dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n"); + err = -ENOMEM; + goto error_iomap; + } + + host->private_data = hsdev; + + /* Setup port */ + host->ports[0]->ioaddr.cmd_addr = base; + host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; + host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET; + sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base); + + /* Read the ID and Version Registers */ + idr = in_le32(&hsdev->sata_dwc_regs->idr); + versionr = in_le32(&hsdev->sata_dwc_regs->versionr); + dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n", + idr, ver[0], ver[1], ver[2]); + + /* Get SATA DMA interrupt number */ + irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); + if (irq == NO_IRQ) { + dev_err(&ofdev->dev, "no SATA DMA irq\n"); + err = -ENODEV; + goto error_out; + } + + /* Get physical SATA DMA register base address */ + host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1); + if (!(host_pvt.sata_dma_regs)) { + dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" + " address\n"); + err = -ENODEV; + goto error_out; + } + + /* Save dev for later use in dev_xxx() routines */ + host_pvt.dwc_dev = &ofdev->dev; + + /* Initialize AHB DMAC */ + dma_dwc_init(hsdev, irq); + + /* Enable SATA Interrupts */ + sata_dwc_enable_interrupts(hsdev); + + /* Get SATA interrupt number */ + irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); + if (irq == NO_IRQ) { + dev_err(&ofdev->dev, "no SATA DMA irq\n"); + err = -ENODEV; + goto error_out; + } + + /* + * Now, register with libATA core, this will also initiate the + * device discovery process, invoking our port_start() handler & + * error_handler() to execute a dummy Softreset EH session + */ + rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); + + if (rc != 0) + dev_err(&ofdev->dev, "failed to activate host"); + + dev_set_drvdata(&ofdev->dev, host); + return 0; + +error_out: + /* Free SATA DMA resources */ + dma_dwc_exit(hsdev); + +error_iomap: + iounmap(base); +error_kmalloc: + kfree(hsdev); +error: + return err; +} + +static int sata_dwc_remove(struct platform_device *ofdev) +{ + struct device *dev = &ofdev->dev; + struct ata_host *host = dev_get_drvdata(dev); + struct sata_dwc_device *hsdev = host->private_data; + + ata_host_detach(host); + dev_set_drvdata(dev, NULL); + + /* Free SATA DMA resources */ + dma_dwc_exit(hsdev); + + iounmap(hsdev->reg_base); + kfree(hsdev); + kfree(host); + dev_dbg(&ofdev->dev, "done\n"); + return 0; +} + +static const struct of_device_id sata_dwc_match[] = { + { .compatible = "amcc,sata-460ex", }, + {} +}; +MODULE_DEVICE_TABLE(of, sata_dwc_match); + +static struct platform_driver sata_dwc_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = sata_dwc_match, + }, + .probe = sata_dwc_probe, + .remove = sata_dwc_remove, +}; + +module_platform_driver(sata_dwc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); +MODULE_DESCRIPTION("DesignWare Cores SATA controller low lever driver"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/sata_dwc_pmp.c b/drivers/ata/sata_dwc_pmp.c new file mode 100644 index 00000000000..3ff190af8d2 --- /dev/null +++ b/drivers/ata/sata_dwc_pmp.c @@ -0,0 +1,3041 @@ +/* + * drivers/ata/sata_dwc.c + * + * Synopsys DesignWare Cores (DWC) SATA host driver + * + * Author: Mark Miesfeld <mmiesfeld@amcc.com> + * + * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de> + * Copyright 2008 DENX Software Engineering + * + * Based on versions provided by AMCC and Synopsys which are: + * Copyright 2006 Applied Micro Circuits Corporation + * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/libata.h> +#include <linux/rtc.h> + +#include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> + + +#ifdef CONFIG_SATA_DWC_DEBUG +#define dwc_dev_dbg(dev, format, arg...) \ + ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; }) +#define dwc_port_dbg(ap, format, arg...) \ + ata_port_printk(ap, KERN_INFO, format, ##arg) +#define dwc_link_dbg(link, format, arg...) \ + ata_link_printk(link, KERN_INFO, format, ##arg) +#else +#define dwc_dev_dbg(dev, format, arg...) \ + ({ 0; }) +#define dwc_port_dbg(ap, format, arg...) \ + ({ 0; }) +#define dwc_link_dbg(link, format, arg...) \ + ({ 0; }) +#endif + +#ifdef CONFIG_SATA_DWC_VDEBUG +#define DEBUG_NCQ +#define dwc_dev_vdbg(dev, format, arg...) \ + ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; }) +#define dwc_port_vdbg(ap, format, arg...) \ + ata_port_printk(ap, KERN_INFO, format, ##arg) +#define dwc_link_vdbg(link, format, arg...) \ + ata_link_printk(link, KERN_INFO, format, ##arg) +#else +#define dwc_dev_vdbg(dev, format, arg...) \ + ({ 0; }) +#define dwc_port_vdbg(ap, format, arg...) \ + ({ 0; }) +#define dwc_link_vdbg(link, format, arg...) \ + ({ 0; }) +#endif + +#define dwc_dev_info(dev, format, arg...) \ + ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; }) +#define dwc_port_info(ap, format, arg...) \ + ata_port_printk(ap, KERN_INFO, format, ##arg) +#define dwc_link_info(link, format, arg...) \ + ata_link_printk(link, KERN_INFO, format, ##arg) + +/* These two are defined in "libata.h" */ +#undef DRV_NAME +#undef DRV_VERSION +#define DRV_NAME "sata-dwc" +#define DRV_VERSION "2.0" + +/* Port Multiplier discovery Signature */ +#define PSCR_SCONTROL_DET_ENABLE 0x00000001 +#define PSCR_SSTATUS_DET_PRESENT 0x00000001 +#define PSCR_SERROR_DIAG_X 0x04000000 + +/* Port multiplier port entry in SCONTROL register */ +#define SCONTROL_PMP_MASK 0x000f0000 +#define PMP_TO_SCONTROL(p) ((p << 16) & 0x000f0000) +#define SCONTROL_TO_PMP(p) (((p) & 0x000f0000) >> 16) + + +/* SATA DMA driver Globals */ +#if defined(CONFIG_APM821xx) +#define DMA_NUM_CHANS 2 +#else +#define DMA_NUM_CHANS 1 +#endif + +#define DMA_NUM_CHAN_REGS 8 + +/* SATA DMA Register definitions */ +#if defined(CONFIG_APM821xx) +#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */ +#else +#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */ +#endif + +#if defined(CONFIG_APOLLO3G) +extern void signal_hdd_led(int, int); +#endif +struct dmareg { + u32 low; /* Low bits 0-31 */ + u32 high; /* High bits 32-63 */ +}; + +/* DMA Per Channel registers */ + +struct dma_chan_regs { + struct dmareg sar; /* Source Address */ + struct dmareg dar; /* Destination address */ + struct dmareg llp; /* Linked List Pointer */ + struct dmareg ctl; /* Control */ + struct dmareg sstat; /* Source Status not implemented in core */ + struct dmareg dstat; /* Destination Status not implemented in core */ + struct dmareg sstatar; /* Source Status Address not impl in core */ + struct dmareg dstatar; /* Destination Status Address not implemented */ + struct dmareg cfg; /* Config */ + struct dmareg sgr; /* Source Gather */ + struct dmareg dsr; /* Destination Scatter */ +}; + +/* Generic Interrupt Registers */ +struct dma_interrupt_regs { + struct dmareg tfr; /* Transfer Interrupt */ + struct dmareg block; /* Block Interrupt */ + struct dmareg srctran; /* Source Transfer Interrupt */ + struct dmareg dsttran; /* Dest Transfer Interrupt */ + struct dmareg error; /* Error */ +}; + +struct ahb_dma_regs { + struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS]; + struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */ + struct dma_interrupt_regs interrupt_status; /* Interrupt Status */ + struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */ + struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */ + struct dmareg statusInt; /* Interrupt combined */ + struct dmareg rq_srcreg; /* Src Trans Req */ + struct dmareg rq_dstreg; /* Dst Trans Req */ + struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req */ + struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req */ + struct dmareg rq_lst_srcreg; /* Last Src Trans Req */ + struct dmareg rq_lst_dstreg; /* Last Dst Trans Req */ + struct dmareg dma_cfg; /* DMA Config */ + struct dmareg dma_chan_en; /* DMA Channel Enable */ + struct dmareg dma_id; /* DMA ID */ + struct dmareg dma_test; /* DMA Test */ + struct dmareg res1; /* reserved */ + struct dmareg res2; /* reserved */ + + /* DMA Comp Params + * Param 6 = dma_param[0], Param 5 = dma_param[1], + * Param 4 = dma_param[2] ... + */ + struct dmareg dma_params[6]; +}; + +/* Data structure for linked list item */ +struct lli { + u32 sar; /* Source Address */ + u32 dar; /* Destination address */ + u32 llp; /* Linked List Pointer */ + struct dmareg ctl; /* Control */ +#if defined(CONFIG_APM821xx) + u32 dstat; /* Source status is not supported */ +#else + struct dmareg dstat; /* Destination Status */ +#endif +}; + +#define SATA_DWC_DMAC_LLI_SZ (sizeof(struct lli)) +#define SATA_DWC_DMAC_LLI_NUM 256 +#define SATA_DWC_DMAC_TWIDTH_BYTES 4 +#define SATA_DWC_DMAC_LLI_TBL_SZ \ + (SATA_DWC_DMAC_LLI_SZ * SATA_DWC_DMAC_LLI_NUM) +#if defined(CONFIG_APM821xx) +#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \ + (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES) +#else +#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \ + (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES) +#endif +/* DMA Register Operation Bits */ +#define DMA_EN 0x00000001 /* Enable AHB DMA */ +#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */ +#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \ + ((0x000000001 << (ch)) << 8)) +#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8)) + +/* Channel Control Register */ +#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */ +#define DMA_CTL_LLP_SRCEN 0x10000000 /* Blk chain enable Src */ +#define DMA_CTL_LLP_DSTEN 0x08000000 /* Blk chain enable Dst */ +/* + * This define is used to set block chaining disabled in the control low + * register. It is already in little endian format so it can be &'d dirctly. + * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN)) + */ +#define DMA_CTL_LLP_DISABLE_LE32 0xffffffe7 +#define DMA_CTL_SMS(num) ((num & 0x3) << 25) /*Src Master Select*/ +#define DMA_CTL_DMS(num) ((num & 0x3) << 23) /*Dst Master Select*/ +#define DMA_CTL_TTFC(type) ((type & 0x7) << 20) /*Type&Flow cntr*/ +#define DMA_CTL_TTFC_P2M_DMAC 0x00000002 /*Per mem,DMAC cntr*/ +#define DMA_CTL_TTFC_M2P_PER 0x00000003 /*Mem per,peri cntr*/ +#define DMA_CTL_SRC_MSIZE(size) ((size & 0x7) << 14) /*Src Burst Len*/ +#define DMA_CTL_DST_MSIZE(size) ((size & 0x7) << 11) /*Dst Burst Len*/ +#define DMA_CTL_SINC_INC 0x00000000 /*Src addr incr*/ +#define DMA_CTL_SINC_DEC 0x00000200 +#define DMA_CTL_SINC_NOCHANGE 0x00000400 +#define DMA_CTL_DINC_INC 0x00000000 /*Dst addr incr*/ +#define DMA_CTL_DINC_DEC 0x00000080 +#define DMA_CTL_DINC_NOCHANGE 0x00000100 +#define DMA_CTL_SRC_TRWID(size) ((size & 0x7) << 4) /*Src Trnsfr Width*/ +#define DMA_CTL_DST_TRWID(size) ((size & 0x7) << 1) /*Dst Trnsfr Width*/ +#define DMA_CTL_INT_EN 0x00000001 /*Interrupt Enable*/ + +/* Channel Configuration Register high bits */ +#define DMA_CFG_FCMOD_REQ 0x00000001 /*Flow cntrl req*/ +#define DMA_CFG_PROTCTL (0x00000003 << 2) /*Protection cntrl*/ + +/* Channel Configuration Register low bits */ +#define DMA_CFG_RELD_DST 0x80000000 /*Reload Dst/Src Addr*/ +#define DMA_CFG_RELD_SRC 0x40000000 +#define DMA_CFG_HS_SELSRC 0x00000800 /*SW hndshk Src/Dst*/ +#define DMA_CFG_HS_SELDST 0x00000400 +#define DMA_CFG_FIFOEMPTY (0x00000001 << 9) /*FIFO Empty bit*/ + +/* Assign hardware handshaking interface (x) to dst / sre peripheral */ +#define DMA_CFG_HW_HS_DEST(int_num) ((int_num & 0xF) << 11) +#define DMA_CFG_HW_HS_SRC(int_num) ((int_num & 0xF) << 7) + +/* Channel Linked List Pointer Register */ +#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master)) +#define DMA_LLP_AHBMASTER1 0 /* List Master Select */ +#define DMA_LLP_AHBMASTER2 1 + +#define SATA_DWC_MAX_PORTS 1 + +#define SATA_DWC_SCR_OFFSET 0x24 +#define SATA_DWC_REG_OFFSET 0x64 + +/* DWC SATA Registers */ +struct sata_dwc_regs { + u32 fptagr; /* 1st party DMA tag */ + u32 fpbor; /* 1st party DMA buffer offset */ + u32 fptcr; /* 1st party DMA Xfr count */ + u32 dmacr; /* DMA Control */ + u32 dbtsr; /* DMA Burst Transac size */ + u32 intpr; /* Interrupt Pending */ + u32 intmr; /* Interrupt Mask */ + u32 errmr; /* Error Mask */ + u32 llcr; /* Link Layer Control */ + u32 phycr; /* PHY Control */ + u32 physr; /* PHY Status */ + u32 rxbistpd; /* Recvd BIST pattern def register */ + u32 rxbistpd1; /* Recvd BIST data dword1 */ + u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ + u32 txbistpd; /* Trans BIST pattern def register */ + u32 txbistpd1; /* Trans BIST data dword1 */ + u32 txbistpd2; /* Trans BIST data dword2 */ + u32 bistcr; /* BIST Control Register */ + u32 bistfctr; /* BIST FIS Count Register */ + u32 bistsr; /* BIST Status Register */ + u32 bistdecr; /* BIST Dword Error count register */ + u32 res[15]; /* Reserved locations */ + u32 testr; /* Test Register */ + u32 versionr; /* Version Register */ + u32 idr; /* ID Register */ + u32 unimpl[192]; /* Unimplemented */ + u32 dmadr[256]; /* FIFO Locations in DMA Mode */ +}; + +#define SCR_SCONTROL_DET_ENABLE 0x00000001 +#define SCR_SSTATUS_DET_PRESENT 0x00000001 +#define SCR_SERROR_DIAG_X 0x04000000 + +/* DWC SATA Register Operations */ +#define SATA_DWC_TXFIFO_DEPTH 0x01FF +#define SATA_DWC_RXFIFO_DEPTH 0x01FF + +#define SATA_DWC_DMACR_TMOD_TXCHEN 0x00000004 +#define SATA_DWC_DMACR_TXCHEN (0x00000001 | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_RXCHEN (0x00000002 | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_TXRXCH_CLEAR SATA_DWC_DMACR_TMOD_TXCHEN + +#define SATA_DWC_DBTSR_MWR(size) ((size/4) & \ + SATA_DWC_TXFIFO_DEPTH) +#define SATA_DWC_DBTSR_MRD(size) (((size/4) & \ + SATA_DWC_RXFIFO_DEPTH) << 16) + +// SATA DWC Interrupts +#define SATA_DWC_INTPR_DMAT 0x00000001 +#define SATA_DWC_INTPR_NEWFP 0x00000002 +#define SATA_DWC_INTPR_PMABRT 0x00000004 +#define SATA_DWC_INTPR_ERR 0x00000008 +#define SATA_DWC_INTPR_NEWBIST 0x00000010 +#define SATA_DWC_INTPR_IPF 0x80000000 +// Interrupt masks +#define SATA_DWC_INTMR_DMATM 0x00000001 +#define SATA_DWC_INTMR_NEWFPM 0x00000002 +#define SATA_DWC_INTMR_PMABRTM 0x00000004 +#define SATA_DWC_INTMR_ERRM 0x00000008 +#define SATA_DWC_INTMR_NEWBISTM 0x00000010 +#define SATA_DWC_INTMR_PRIMERRM 0x00000020 +#define SATA_DWC_INTPR_CMDGOOD 0x00000080 +#define SATA_DWC_INTPR_CMDABORT 0x00000040 + +#define SATA_DWC_LLCR_SCRAMEN 0x00000001 +#define SATA_DWC_LLCR_DESCRAMEN 0x00000002 +#define SATA_DWC_LLCR_RPDEN 0x00000004 + +// Defines for SError register +#define SATA_DWC_SERR_ERRI 0x00000001 // Recovered data integrity error +#define SATA_DWC_SERR_ERRM 0x00000002 // Recovered communication error +#define SATA_DWC_SERR_ERRT 0x00000100 // Non-recovered transient data integrity error +#define SATA_DWC_SERR_ERRC 0x00000200 // Non-recovered persistent communication or data integrity error +#define SATA_DWC_SERR_ERRP 0x00000400 // Protocol error +#define SATA_DWC_SERR_ERRE 0x00000800 // Internal host adapter error +#define SATA_DWC_SERR_DIAGN 0x00010000 // PHYRdy change +#define SATA_DWC_SERR_DIAGI 0x00020000 // PHY internal error +#define SATA_DWC_SERR_DIAGW 0x00040000 // Phy COMWAKE signal is detected +#define SATA_DWC_SERR_DIAGB 0x00080000 // 10b to 8b decoder err +#define SATA_DWC_SERR_DIAGT 0x00100000 // Disparity error +#define SATA_DWC_SERR_DIAGC 0x00200000 // CRC error +#define SATA_DWC_SERR_DIAGH 0x00400000 // Handshake error +#define SATA_DWC_SERR_DIAGL 0x00800000 // Link sequence (illegal transition) error +#define SATA_DWC_SERR_DIAGS 0x01000000 // Transport state transition error +#define SATA_DWC_SERR_DIAGF 0x02000000 // Unrecognized FIS type +#define SATA_DWC_SERR_DIAGX 0x04000000 // Exchanged error - Set when PHY COMINIT signal is detected. +#define SATA_DWC_SERR_DIAGA 0x08000000 // Port Selector Presence detected + +/* This is all error bits, zero's are reserved fields. */ +#define SATA_DWC_SERR_ERR_BITS 0x0FFF0F03 + +#define SATA_DWC_SCR0_SPD_GET(v) ((v >> 4) & 0x0000000F) + +struct sata_dwc_device { + struct resource reg; /* Resource for register */ + struct device *dev; /* generic device struct */ + struct ata_probe_ent *pe; /* ptr to probe-ent */ + struct ata_host *host; + u8 *reg_base; + struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ + u8 *scr_base; + int dma_channel; /* DWC SATA DMA channel */ + int irq_dma; + struct timer_list an_timer; +}; + +#define SATA_DWC_QCMD_MAX 32 + +struct sata_dwc_device_port { + struct sata_dwc_device *hsdev; + int cmd_issued[SATA_DWC_QCMD_MAX]; + struct lli *llit[SATA_DWC_QCMD_MAX]; + dma_addr_t llit_dma[SATA_DWC_QCMD_MAX]; + u32 dma_chan[SATA_DWC_QCMD_MAX]; + int dma_pending[SATA_DWC_QCMD_MAX]; + u32 sata_dwc_sactive_issued; /* issued queued ops */ + u32 sata_dwc_sactive_queued; /* queued ops */ + u32 dma_interrupt_count; + +}; + +static struct sata_dwc_device* dwc_dev_list[2]; +static int dma_intr_registered = 0; +/* + * Commonly used DWC SATA driver Macros + */ +#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *) \ + (host)->private_data) +#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *) \ + (ap)->host->private_data) +#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *) \ + (ap)->private_data) +#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *) \ + (qc)->ap->host->private_data) +#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *) \ + (hsdevp)->hsdev) + +enum { + SATA_DWC_CMD_ISSUED_NOT = 0, + SATA_DWC_CMD_ISSUED_PENDING = 1, + SATA_DWC_CMD_ISSUED_EXEC = 2, + SATA_DWC_CMD_ISSUED_NODATA = 3, + + SATA_DWC_DMA_PENDING_NONE = 0, + SATA_DWC_DMA_PENDING_TX = 1, + SATA_DWC_DMA_PENDING_RX = 2, +}; + +/* + * Globals + */ +static struct ahb_dma_regs *sata_dma_regs = 0; + +/* + * Prototypes + */ +static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); +static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, + u32 check_status); +static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); +static void sata_dwc_port_stop(struct ata_port *ap); +static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); + +static int dma_dwc_init(struct sata_dwc_device *hsdev); +static void dma_dwc_exit(struct sata_dwc_device *hsdev); +static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc, + struct lli *lli, dma_addr_t dma_lli, + void __iomem *addr); +static void dma_dwc_xfer_start(int dma_ch); +static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch); +static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev); +static void sata_dwc_init_port ( struct ata_port *ap ); +u8 sata_dwc_check_status(struct ata_port *ap); + + + + +static const char *dir_2_txt(enum dma_data_direction dir) +{ + switch (dir) { + case DMA_BIDIRECTIONAL: + return "bi"; + case DMA_FROM_DEVICE: + return "from"; + case DMA_TO_DEVICE: + return "to"; + case DMA_NONE: + return "none"; + default: + return "err"; + } +} + +static const char *prot_2_txt(enum ata_tf_protocols protocol) +{ + switch (protocol) { + case ATA_PROT_UNKNOWN: + return "unknown"; + case ATA_PROT_NODATA: + return "nodata"; + case ATA_PROT_PIO: + return "pio"; + case ATA_PROT_DMA: + return "dma"; + case ATA_PROT_NCQ: + return "ncq"; + case ATAPI_PROT_PIO: + return "atapi pio"; + case ATAPI_PROT_NODATA: + return "atapi nodata"; + case ATAPI_PROT_DMA: + return "atapi dma"; + default: + return "err"; + } +} + +inline const char *ata_cmd_2_txt(const struct ata_taskfile *tf) +{ + switch (tf->command) { + case ATA_CMD_CHK_POWER: + return "ATA_CMD_CHK_POWER"; + case ATA_CMD_EDD: + return "ATA_CMD_EDD"; + case ATA_CMD_FLUSH: + return "ATA_CMD_FLUSH"; + case ATA_CMD_FLUSH_EXT: + return "ATA_CMD_FLUSH_EXT"; + case ATA_CMD_ID_ATA: + return "ATA_CMD_ID_ATA"; + case ATA_CMD_ID_ATAPI: + return "ATA_CMD_ID_ATAPI"; + case ATA_CMD_FPDMA_READ: + return "ATA_CMD_FPDMA_READ"; + case ATA_CMD_FPDMA_WRITE: + return "ATA_CMD_FPDMA_WRITE"; + case ATA_CMD_READ: + return "ATA_CMD_READ"; + case ATA_CMD_READ_EXT: + return "ATA_CMD_READ_EXT"; + case ATA_CMD_READ_NATIVE_MAX_EXT : + return "ATA_CMD_READ_NATIVE_MAX_EXT"; + case ATA_CMD_VERIFY_EXT : + return "ATA_CMD_VERIFY_EXT"; + case ATA_CMD_WRITE: + return "ATA_CMD_WRITE"; + case ATA_CMD_WRITE_EXT: + return "ATA_CMD_WRITE_EXT"; + case ATA_CMD_PIO_READ: + return "ATA_CMD_PIO_READ"; + case ATA_CMD_PIO_READ_EXT: + return "ATA_CMD_PIO_READ_EXT"; + case ATA_CMD_PIO_WRITE: + return "ATA_CMD_PIO_WRITE"; + case ATA_CMD_PIO_WRITE_EXT: + return "ATA_CMD_PIO_WRITE_EXT"; + case ATA_CMD_SET_FEATURES: + return "ATA_CMD_SET_FEATURES"; + case ATA_CMD_PACKET: + return "ATA_CMD_PACKET"; + case ATA_CMD_PMP_READ: + return "ATA_CMD_PMP_READ"; + case ATA_CMD_PMP_WRITE: + return "ATA_CMD_PMP_WRITE"; + default: + return "ATA_CMD_???"; + } +} + +/* + * Dump content of the taskfile + */ +static void sata_dwc_tf_dump(struct device *dwc_dev, struct ata_taskfile *tf) +{ + dwc_dev_vdbg(dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx" + "device: %x\n", tf->command, prot_2_txt(tf->protocol), + tf->flags, tf->device); + dwc_dev_vdbg(dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam:" + "0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal, + tf->lbam, tf->lbah); + dwc_dev_vdbg(dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x " + "hob_lbam: 0x%x hob_lbah: 0x%x\n", tf->hob_feature, + tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, + tf->hob_lbah); +} + +/* + * Function: get_burst_length_encode + * arguments: datalength: length in bytes of data + * returns value to be programmed in register corresponding to data length + * This value is effectively the log(base 2) of the length + */ +static inline int get_burst_length_encode(int datalength) +{ + int items = datalength >> 2; /* div by 4 to get lword count */ + + if (items >= 64) + return 5; + + if (items >= 32) + return 4; + + if (items >= 16) + return 3; + + if (items >= 8) + return 2; + + if (items >= 4) + return 1; + + return 0; +} + +/* + * Clear Interrupts on a DMA channel + */ +static inline void clear_chan_interrupts(int c) +{ + out_le32(&(sata_dma_regs->interrupt_clear.tfr.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.block.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.srctran.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.dsttran.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.error.low), DMA_CHANNEL(c)); +} + +/* + * Function: dma_request_channel + * arguments: None + * returns channel number if available else -1 + * This function assigns the next available DMA channel from the list to the + * requester + */ +static int dma_request_channel(struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + + if (!(in_le32(&(sata_dma_regs->dma_chan_en.low)) & DMA_CHANNEL(hsdev->dma_channel))) { + dwc_port_vdbg(ap, "%s Successfully requested DMA channel %d\n", + __func__, hsdev->dma_channel); + return (hsdev->dma_channel); + } + + return -1; +} + + + +/* + * Function: dma_dwc_interrupt + * arguments: irq, dev_id, pt_regs + * returns channel number if available else -1 + * Interrupt Handler for DW AHB SATA DMA + */ +static int dma_dwc_interrupt(int irq, void *hsdev_instance) +{ + volatile u32 tfr_reg, err_reg; + unsigned long flags; + struct sata_dwc_device *hsdev = hsdev_instance; + struct ata_host *host = (struct ata_host *)hsdev->host; + struct ata_port *ap; + struct sata_dwc_device_port *hsdevp; + u8 tag = 0; + int chan; + unsigned int port = 0; + spin_lock_irqsave(&host->lock, flags); + + ap = host->ports[port]; + hsdevp = HSDEVP_FROM_AP(ap); + tag = ap->link.active_tag; + + dwc_port_vdbg(ap, "%s: DMA interrupt in channel %d\n", __func__, hsdev->dma_channel); + + tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low)); + err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low)); + + dwc_port_vdbg(ap, "eot=0x%08x err=0x%08x pending=%d active port=%d\n", + tfr_reg, err_reg, hsdevp->dma_pending[tag], port); + chan = hsdev->dma_channel; + + if (tfr_reg & DMA_CHANNEL(chan)) { + /* + *Each DMA command produces 2 interrupts. Only + * complete the command after both interrupts have been + * seen. (See sata_dwc_isr()) + */ + hsdevp->dma_interrupt_count++; + sata_dwc_clear_dmacr(hsdevp, tag); + + if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) { + dev_err(ap->dev, "DMA not pending eot=0x%08x " + "err=0x%08x tag=0x%02x pending=%d\n", + tfr_reg, err_reg, tag, + hsdevp->dma_pending[tag]); + } + + // Do remain jobs after DMA transfer complete + if ((hsdevp->dma_interrupt_count % 2) == 0) + sata_dwc_dma_xfer_complete(ap, 1); + + /* Clear the interrupt */ + out_le32(&(sata_dma_regs->interrupt_clear.tfr.low), + DMA_CHANNEL(chan)); + } + + /* Process error interrupt. */ + // We do not expect error happen + if (unlikely(err_reg & DMA_CHANNEL(chan))) { + /* TODO Need error handler ! */ + dev_err(ap->dev, "error interrupt err_reg=0x%08x\n", + err_reg); + + spin_lock_irqsave(ap->lock, flags); + //if (ata_is_dma(qc->tf.protocol)) { + /* disable DMAC */ + dma_dwc_terminate_dma(ap, chan); + //} + spin_unlock_irqrestore(ap->lock, flags); + + /* Clear the interrupt. */ + out_le32(&(sata_dma_regs->interrupt_clear.error.low), + DMA_CHANNEL(chan)); + } + + spin_unlock_irqrestore(&host->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t dma_dwc_handler(int irq, void *hsdev_instance) +{ + volatile u32 tfr_reg, err_reg; + int chan; + + tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low)); + err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low)); + + for (chan = 0; chan < DMA_NUM_CHANS; chan++) { + /* Check for end-of-transfer interrupt. */ + + if (tfr_reg & DMA_CHANNEL(chan)) { + dma_dwc_interrupt(0, dwc_dev_list[chan]); + } + else + + /* Check for error interrupt. */ + if (err_reg & DMA_CHANNEL(chan)) { + dma_dwc_interrupt(0, dwc_dev_list[chan]); + } + } + + return IRQ_HANDLED; +} + +static int dma_register_interrupt (struct sata_dwc_device *hsdev) +{ + int retval = 0; + int irq = hsdev->irq_dma; + /* + * FIXME: 2 SATA controllers share the same DMA engine so + * currently, they also share same DMA interrupt + */ + if (!dma_intr_registered) { + printk("%s register irq (%d)\n", __func__, irq); + retval = request_irq(irq, dma_dwc_handler, IRQF_SHARED, "SATA DMA", hsdev); + //retval = request_irq(irq, dma_dwc_handler, IRQF_DISABLED, "SATA DMA", NULL); + if (retval) { + dev_err(hsdev->dev, "%s: could not get IRQ %d\n", __func__, irq); + return -ENODEV; + } + //dma_intr_registered = 1; + } + return retval; +} + +/* + * Function: dma_request_interrupts + * arguments: hsdev + * returns status + * This function registers ISR for a particular DMA channel interrupt + */ +static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq) +{ + int retval = 0; + int dma_chan = hsdev->dma_channel; + + /* Unmask error interrupt */ + out_le32(&sata_dma_regs->interrupt_mask.error.low, + in_le32(&sata_dma_regs->interrupt_mask.error.low) | DMA_ENABLE_CHAN(dma_chan)); + + /* Unmask end-of-transfer interrupt */ + out_le32(&sata_dma_regs->interrupt_mask.tfr.low, + in_le32(&sata_dma_regs->interrupt_mask.tfr.low) | DMA_ENABLE_CHAN(dma_chan)); + + dwc_dev_vdbg(hsdev->dev, "Current value of interrupt_mask.error=0x%0x\n", in_le32(&sata_dma_regs->interrupt_mask.error.low)); + dwc_dev_vdbg(hsdev->dev, "Current value of interrupt_mask.tfr=0x%0x\n", in_le32(&sata_dma_regs->interrupt_mask.tfr.low)); +#if 0 + out_le32(&sata_dma_regs->interrupt_mask.block.low, + DMA_ENABLE_CHAN(dma_chan)); + + out_le32(&sata_dma_regs->interrupt_mask.srctran.low, + DMA_ENABLE_CHAN(dma_chan)); + + out_le32(&sata_dma_regs->interrupt_mask.dsttran.low, + DMA_ENABLE_CHAN(dma_chan)); +#endif + return retval; +} + +/* + * Function: map_sg_to_lli + * arguments: sg: scatter/gather list(sg) + * num_elems: no of elements in sg list + * dma_lli: LLI table + * dest: destination address + * read: whether the transfer is read or write + * returns array of AHB DMA Linked List Items + * This function creates a list of LLIs for DMA Xfr and returns the number + * of elements in the DMA linked list. + * + * Note that the Synopsis driver has a comment proposing that better performance + * is possible by only enabling interrupts on the last item in the linked list. + * However, it seems that could be a problem if an error happened on one of the + * first items. The transfer would halt, but no error interrupt would occur. + * + * Currently this function sets interrupts enabled for each linked list item: + * DMA_CTL_INT_EN. + */ +static int map_sg_to_lli(struct ata_queued_cmd *qc, struct lli *lli, + dma_addr_t dma_lli, void __iomem *dmadr_addr) +{ + struct scatterlist *sg = qc->sg; + struct device *dwc_dev = qc->ap->dev; + int num_elems = qc->n_elem; + int dir = qc->dma_dir; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(qc->ap); + + int i, idx = 0; + int fis_len = 0; + dma_addr_t next_llp; + int bl; + unsigned int dma_ts = 0; + + dwc_port_vdbg(qc->ap, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x " + "dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli, + (u32)dmadr_addr); + + bl = get_burst_length_encode(AHB_DMA_BRST_DFLT); + + for (i = 0; i < num_elems; i++, sg++) { + u32 addr, offset; + u32 sg_len, len; + + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + dwc_port_vdbg(qc->ap, "%s: elem=%d sg_addr=0x%x sg_len=%d\n", + __func__, i, addr, sg_len); + + while (sg_len) { + + if (unlikely(idx >= SATA_DWC_DMAC_LLI_NUM)) { + /* The LLI table is not large enough. */ + dev_err(dwc_dev, "LLI table overrun (idx=%d)\n", + idx); + break; + } + len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ? + SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len; + + offset = addr & 0xffff; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + /* + * Make sure a LLI block is not created that will span a + * 8K max FIS boundary. If the block spans such a FIS + * boundary, there is a chance that a DMA burst will + * cross that boundary -- this results in an error in + * the host controller. + */ + if (unlikely(fis_len + len > 8192)) { + dwc_port_vdbg(qc->ap, "SPLITTING: fis_len=%d(0x%x) " + "len=%d(0x%x)\n", fis_len, fis_len, + len, len); + len = 8192 - fis_len; + fis_len = 0; + } else { + fis_len += len; + } + if (fis_len == 8192) + fis_len = 0; + + /* + * Set DMA addresses and lower half of control register + * based on direction. + */ + dwc_port_vdbg(qc->ap, "%s: sg_len = %d, len = %d\n", __func__, sg_len, len); + +#if defined(CONFIG_APM821xx) + if (dir == DMA_FROM_DEVICE) { + lli[idx].dar = cpu_to_le32(addr); + lli[idx].sar = cpu_to_le32((u32)dmadr_addr); + if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | + DMA_CTL_SMS(1) | /* Source: Master 2 */ + DMA_CTL_DMS(0) | /* Dest: Master 1 */ + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_SINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | + DMA_CTL_SMS(2) | /* Source: Master 3 */ + DMA_CTL_DMS(0) | /* Dest: Master 1 */ + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_SINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } + } else { /* DMA_TO_DEVICE */ + lli[idx].sar = cpu_to_le32(addr); + lli[idx].dar = cpu_to_le32((u32)dmadr_addr); + if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | + DMA_CTL_SMS(0) | + DMA_CTL_DMS(1) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_DINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | + DMA_CTL_SMS(0) | + DMA_CTL_DMS(2) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_DINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } + } +#else + if (dir == DMA_FROM_DEVICE) { + lli[idx].dar = cpu_to_le32(addr); + lli[idx].sar = cpu_to_le32((u32)dmadr_addr); + + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | + DMA_CTL_SMS(0) | + DMA_CTL_DMS(1) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_SINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } else { /* DMA_TO_DEVICE */ + lli[idx].sar = cpu_to_le32(addr); + lli[idx].dar = cpu_to_le32((u32)dmadr_addr); + + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | + DMA_CTL_SMS(1) | + DMA_CTL_DMS(0) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_DINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } +#endif + dwc_port_vdbg(qc->ap, "%s setting ctl.high len: 0x%08x val: " + "0x%08x\n", __func__, len, + DMA_CTL_BLK_TS(len / 4)); + + /* Program the LLI CTL high register */ + dma_ts = DMA_CTL_BLK_TS(len / 4); + lli[idx].ctl.high = cpu_to_le32(dma_ts); + + /* + *Program the next pointer. The next pointer must be + * the physical address, not the virtual address. + */ + next_llp = (dma_lli + ((idx + 1) * sizeof(struct lli))); + + /* The last 2 bits encode the list master select. */ +#if defined(CONFIG_APM821xx) + next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER1); +#else + next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2); +#endif + + lli[idx].llp = cpu_to_le32(next_llp); + + dwc_port_vdbg(qc->ap, "%s: index %d\n", __func__, idx); + dwc_port_vdbg(qc->ap, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx].ctl.high); + dwc_port_vdbg(qc->ap, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx].ctl.low); + dwc_port_vdbg(qc->ap, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx].dar); + dwc_port_vdbg(qc->ap, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx].sar); + dwc_port_vdbg(qc->ap, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx].llp); + + idx++; + sg_len -= len; + addr += len; + } + } + + /* + * The last next ptr has to be zero and the last control low register + * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source + * and destination enable) set back to 0 (disabled.) This is what tells + * the core that this is the last item in the linked list. + */ + if (likely(idx)) { + lli[idx-1].llp = 0x00000000; + lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32; + + /* Flush cache to memory */ + dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx), + DMA_BIDIRECTIONAL); + } + + dwc_port_vdbg(qc->ap, "%s: Final index %d\n", __func__, idx-1); + dwc_port_vdbg(qc->ap, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx-1].ctl.high); + dwc_port_vdbg(qc->ap, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx-1].ctl.low); + dwc_port_vdbg(qc->ap, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx-1].dar); + dwc_port_vdbg(qc->ap, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx-1].sar); + dwc_port_vdbg(qc->ap, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx-1].llp); + + return idx; +} + +/* + * Function: dma_dwc_xfer_start + * arguments: Channel number + * Return : None + * Enables the DMA channel + */ +static void dma_dwc_xfer_start(int dma_ch) +{ + /* Enable the DMA channel */ + out_le32(&(sata_dma_regs->dma_chan_en.low), + in_le32(&(sata_dma_regs->dma_chan_en.low)) | + DMA_ENABLE_CHAN(dma_ch)); + +#if defined(CONFIG_SATA_DWC_VDEBUG) + printk("DMA CFG = 0x%08x\n", in_le32(&(sata_dma_regs->dma_cfg.low))); + printk("%s: setting sata_dma_regs->dma_chan_en.low with val: 0x%08x\n", + __func__, in_le32(&(sata_dma_regs->dma_chan_en.low))); +#endif + + +#if defined(CONFIG_APOLLO3G) + signal_hdd_led(1 /*blink=yes*/, 2 /* _3G_LED_GREEN */); +#endif + +} + +/* + * Check if the selected DMA channel is currently enabled. + */ +static int dma_dwc_channel_enabled(int ch) +{ + u32 dma_chan; + + // Read the DMA channel register + dma_chan = in_le32(&(sata_dma_regs->dma_chan_en.low)); + if (dma_chan & DMA_CHANNEL(ch)) + return 1; + + return 0; +} + +/* + * Terminate the current DMA transaction + */ +static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch) +{ + int enabled = dma_dwc_channel_enabled(dma_ch); + + dev_info(ap->dev, "%s terminate DMA on channel=%d enabled=%d\n", + __func__, dma_ch, enabled); + + if (enabled) { + // Disable the selected channel + out_le32(&(sata_dma_regs->dma_chan_en.low), + in_le32(&(sata_dma_regs->dma_chan_en.low)) | DMA_DISABLE_CHAN(dma_ch)); + + // Wait for the channel is disabled + do { + enabled = dma_dwc_channel_enabled(dma_ch); + msleep(10); + } while (enabled); + } +} + + +/* + * Setup data and DMA configuration ready for DMA transfer + */ +static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc, + struct lli *lli, dma_addr_t dma_lli, + void __iomem *addr) +{ + int dma_ch; + int num_lli; + + /* Acquire DMA channel */ + dma_ch = dma_request_channel(qc->ap); + if (unlikely(dma_ch == -1)) { + dev_err(qc->ap->dev, "%s: dma channel unavailable\n", __func__); + return -EAGAIN; + } + dwc_port_vdbg(qc->ap, "%s: Got channel %d\n", __func__, dma_ch); + + /* Convert SG list to linked list of items (LLIs) for AHB DMA */ + num_lli = map_sg_to_lli(qc, lli, dma_lli, addr); + + dwc_port_vdbg(qc->ap, "%s sg: 0x%p, count: %d lli: %p dma_lli: 0x%0xlx addr:" + " %p lli count: %d\n", __func__, qc->sg, qc->n_elem, lli, + (u32)dma_lli, addr, num_lli); + + /* Clear channel interrupts */ + clear_chan_interrupts(dma_ch); + + /* Program the CFG register. */ +#if defined(CONFIG_APM821xx) + if (dma_ch == 0) { + /* Buffer mode enabled, FIFO_MODE=0 */ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x0000009); + /* Channel 0 bit[7:5] */ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020); + } else if (dma_ch == 1) { + /* Buffer mode enabled, FIFO_MODE=0 */ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x0000088d); + /* Channel 1 bit[7:5] */ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020); + } +#else + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), + DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ); + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0); +#endif + + /* Program the address of the linked list */ +#if defined(CONFIG_APM821xx) + out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low), + DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER1)); +#else + out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low), + DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2)); +#endif + + /* Program the CTL register with src enable / dst enable */ + //out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low), + // DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN); + out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low), 0x18000000); + + dwc_port_vdbg(qc->ap, "%s DMA channel %d is ready\n", __func__, dma_ch); + dwc_port_vdbg(qc->ap, "%s setting cfg.high of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high))); + dwc_port_vdbg(qc->ap, "%s setting cfg.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low))); + dwc_port_vdbg(qc->ap, "%s setting llp.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low))); + dwc_port_vdbg(qc->ap, "%s setting ctl.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low))); + + return dma_ch; +} + +/* + * Function: dma_dwc_exit + * arguments: None + * returns status + * This function exits the SATA DMA driver + */ +static void dma_dwc_exit(struct sata_dwc_device *hsdev) +{ + dwc_dev_vdbg(hsdev->dev, "%s:\n", __func__); + if (sata_dma_regs) + iounmap(sata_dma_regs); + + if (hsdev->irq_dma) + free_irq(hsdev->irq_dma, hsdev); +} + +/* + * Function: dma_dwc_init + * arguments: hsdev + * returns status + * This function initializes the SATA DMA driver + */ +static int dma_dwc_init(struct sata_dwc_device *hsdev) +{ + int err; + int irq = hsdev->irq_dma; + + err = dma_request_interrupts(hsdev, irq); + if (err) { + dev_err(hsdev->dev, "%s: dma_request_interrupts returns %d\n", + __func__, err); + goto error_out; + } + + /* Enabe DMA */ + out_le32(&(sata_dma_regs->dma_cfg.low), DMA_EN); + + dev_notice(hsdev->dev, "DMA initialized\n"); + dev_notice(hsdev->dev, "DMA CFG = 0x%08x\n", in_le32(&(sata_dma_regs->dma_cfg.low))); + dwc_dev_vdbg(hsdev->dev, "SATA DMA registers=0x%p\n", sata_dma_regs); + + return 0; + +error_out: + dma_dwc_exit(hsdev); + + return err; +} + + +static void sata_dwc_dev_config(struct ata_device *adev) +{ + /* + * Does not support NCQ over a port multiplier + * (no FIS-based switching). + */ + if (adev->flags & ATA_DFLAG_NCQ) { + /* + * TODO: debug why enabling NCQ makes the linux crashed + * in hot plug after the first hot unplug action. + * --> need to investigate more + */ + adev->flags &= ~ATA_DFLAG_NCQ; + if (sata_pmp_attached(adev->link->ap)) { + adev->flags &= ~ATA_DFLAG_NCQ; + ata_dev_printk(adev, KERN_INFO, + "NCQ disabled for command-based switching\n"); + } + } + + /* + * Since the sata_pmp_error_handler function in libata-pmp + * make FLAG_AN disabled in the first time SATA port is configured. + * Asynchronous notification is not configured. + * This will enable the AN feature manually. + */ + adev->flags |= ATA_DFLAG_AN; +} + + +static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) +{ + if (unlikely(scr > SCR_NOTIFICATION)) { + dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", + __func__, scr); + return -EINVAL; + } + + *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4)); + dwc_dev_vdbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", + __func__, link->ap->print_id, scr, *val); + + return 0; +} + +static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) +{ + dwc_dev_vdbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", + __func__, link->ap->print_id, scr, val); + if (unlikely(scr > SCR_NOTIFICATION)) { + dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", + __func__, scr); + return -EINVAL; + } + out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val); + + return 0; +} + +static inline u32 sata_dwc_core_scr_read ( struct ata_port *ap, unsigned int scr) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + return in_le32((void __iomem *)hsdev->scr_base + (scr * 4)); +} + + +static inline void sata_dwc_core_scr_write ( struct ata_port *ap, unsigned int scr, u32 val) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + out_le32((void __iomem *)hsdev->scr_base + (scr * 4), val); +} + +static inline void clear_serror(struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + out_le32( (void __iomem *)hsdev->scr_base + 4, + in_le32((void __iomem *)hsdev->scr_base + 4)); +} + +static inline void clear_intpr(struct sata_dwc_device *hsdev) +{ + out_le32(&hsdev->sata_dwc_regs->intpr, + in_le32(&hsdev->sata_dwc_regs->intpr)); +} + +static inline void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) +{ + out_le32(&hsdev->sata_dwc_regs->intpr, bit); + // in_le32(&hsdev->sata_dwc_regs->intpr)); +} + + +static inline void enable_err_irq(struct sata_dwc_device *hsdev) +{ + out_le32(&hsdev->sata_dwc_regs->intmr, + in_le32(&hsdev->sata_dwc_regs->intmr) | SATA_DWC_INTMR_ERRM); + out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERR_ERR_BITS); +} + +static inline u32 qcmd_tag_to_mask(u8 tag) +{ + return 0x00000001 << (tag & 0x1f); +} + + +/* + * Timer to monitor SCR_NOTIFICATION registers on the + * SATA port + */ +static void sata_dwc_an_chk(unsigned long arg) +{ + struct ata_port *ap = (void *)arg; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + unsigned long flags; + int rc = 0x0; + u32 sntf = 0x0; + + spin_lock_irqsave(ap->lock, flags); + rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); + + // If some changes on the SCR4, call asynchronous notification + if ( (rc == 0) & (sntf != 0)) { + dwc_port_dbg(ap, "Call assynchronous notification sntf=0x%08x\n", sntf); + sata_async_notification(ap); + hsdev->an_timer.expires = jiffies + msecs_to_jiffies(8000); + } else { + hsdev->an_timer.expires = jiffies + msecs_to_jiffies(3000); + } + add_timer(&hsdev->an_timer); + spin_unlock_irqrestore(ap->lock, flags); +} + + +/* + * sata_dwc_pmp_select - Set the PMP field in SControl to the specified port number. + * + * @port: The value (port number) to set the PMP field to. + * + * @return: The old value of the PMP field. + */ +static u32 sata_dwc_pmp_select(struct ata_port *ap, u32 port) +{ + u32 scontrol, old_port; + if (sata_pmp_supported(ap)) { + scontrol = sata_dwc_core_scr_read(ap, SCR_CONTROL); + old_port = SCONTROL_TO_PMP(scontrol); + + // Select new PMP port + if ( port != old_port ) { + scontrol &= ~SCONTROL_PMP_MASK; + sata_dwc_core_scr_write(ap, SCR_CONTROL, scontrol | PMP_TO_SCONTROL(port)); + dwc_port_dbg(ap, "%s: old port=%d new port=%d\n", __func__, old_port, port); + } + return old_port; + } + else + return port; +} + +/* + * Get the current PMP port + */ +static inline u32 current_pmp(struct ata_port *ap) +{ + return SCONTROL_TO_PMP(sata_dwc_core_scr_read(ap, SCR_CONTROL)); +} + + +/* + * Process when a PMP card is attached in the SATA port. + * Since our SATA port support command base switching only, + * NCQ will not be available. + * We disable the NCQ feature in SATA port. + */ +static void sata_dwc_pmp_attach ( struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + + dev_info(ap->dev, "Attach SATA port multiplier with %d ports\n", ap->nr_pmp_links); + // Disable NCQ + ap->flags &= ~ATA_FLAG_NCQ; + + // Initialize timer for checking AN + init_timer(&hsdev->an_timer); + hsdev->an_timer.expires = jiffies + msecs_to_jiffies(20000); + hsdev->an_timer.function = sata_dwc_an_chk; + hsdev->an_timer.data = (unsigned long)(ap); + add_timer(&hsdev->an_timer); +} + +/* + * Process when PMP card is removed from the SATA port. + * Re-enable NCQ for using by the SATA drive in the future + */ +static void sata_dwc_pmp_detach ( struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + + dev_info(ap->dev, "Detach SATA port\n"); + // Re-enable NCQ + // TODO: remove the below comment out when NCQ problem fixed + //ap->flags |= ATA_FLAG_NCQ; + + sata_dwc_pmp_select(ap, 0); + + // Delete timer since PMP card is detached + del_timer(&hsdev->an_timer); +} + + + +// Check the link to be ready +int sata_dwc_check_ready ( struct ata_link *link ) { + u8 status; + struct ata_port *ap = link->ap; + status = ioread8(ap->ioaddr.status_addr); + return ata_check_ready(status); +} + + +/* + * Do soft reset on the current SATA link. + */ +static int sata_dwc_softreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) +{ + int rc; + struct ata_port *ap = link->ap; + struct ata_ioports *ioaddr = &ap->ioaddr; + struct ata_taskfile tf; + + sata_dwc_pmp_select(link->ap, sata_srst_pmp(link)); + + /* Issue bus reset */ + iowrite8(ap->ctl, ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + iowrite8(ap->ctl, ioaddr->ctl_addr); + ap->last_ctl = ap->ctl; + + /* Always check readiness of the master device */ + rc = ata_wait_after_reset(link, deadline, sata_dwc_check_ready); + + // Classify the ata_port + *classes = ATA_DEV_NONE; + /* Verify if SStatus indicates device presence */ + if (ata_link_online(link)) { + memset(&tf, 0, sizeof(tf)); + ata_sff_tf_read(ap, &tf); + *classes = ata_dev_classify(&tf); + } + + if ( *classes == ATA_DEV_PMP) + dwc_link_dbg(link, "-->found PMP device by sig\n"); + + clear_serror(link->ap); + + return rc; +} + + + + +/* + * sata_dwc_hardreset - Do hardreset the SATA controller + */ +static int sata_dwc_hardreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) +{ + int rc; + const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); + bool online; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); + + dwc_link_dbg(link, "%s\n", __func__); + sata_dwc_pmp_select(link->ap, sata_srst_pmp(link)); + dwc_port_vdbg(link->ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr))); + + // Call standard hard reset + rc = sata_link_hardreset(link, timing, deadline, &online, NULL); + + // Reconfigure the port after hard reset + if ( ata_link_online(link) ) + sata_dwc_init_port(link->ap); + + return online ? -EAGAIN : rc; +} + +/* + * Do hard reset on each PMP link + */ +static int sata_dwc_pmp_hardreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) +{ + int rc = 0; + sata_dwc_pmp_select(link->ap, sata_srst_pmp(link)); + rc = sata_std_hardreset(link, classes, deadline); + return rc; +} + +/* See ahci.c */ +/* + * Process error when the SATAn_INTPR's ERR bit is set + * The processing is based on SCR_ERROR register content + */ +static void sata_dwc_error_intr(struct ata_port *ap, + struct sata_dwc_device *hsdev, uint intpr) +{ + struct ata_eh_info *ehi; + struct ata_link *link; + struct ata_queued_cmd *active_qc = NULL; + u32 serror; + bool freeze = false, abort = false; + int pmp, ret; + unsigned int err_mask = 0, action = 0; +#if defined(CONFIG_SATA_DWC_VDEBUG) + int dma_chan = hsdev->dma_channel; +#endif + + link = &ap->link; + ehi = &link->eh_info; + + /* Record irq stat */ + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "irq_stat 0x%08x", intpr); + + // Record SERROR + serror = sata_dwc_core_scr_read(ap, SCR_ERROR); + dwc_port_dbg(ap, "%s serror = 0x%08x\n", __func__, serror); + + // Clear SERROR and interrupt bit + clear_serror(ap); + clear_intpr(hsdev); + + // Print out for test only + if ( serror ) { + dwc_port_info(ap, "Detect errors:"); + if ( serror & SATA_DWC_SERR_ERRI ) + printk(" ERRI"); + if ( serror & SATA_DWC_SERR_ERRM ) + printk(" ERRM"); + if ( serror & SATA_DWC_SERR_ERRT ) + printk(" ERRT"); + if ( serror & SATA_DWC_SERR_ERRC ) + printk(" ERRC"); + if ( serror & SATA_DWC_SERR_ERRP ) + printk(" ERRP"); + if ( serror & SATA_DWC_SERR_ERRE ) + printk(" ERRE"); + if ( serror & SATA_DWC_SERR_DIAGN ) + printk(" DIAGN"); + if ( serror & SATA_DWC_SERR_DIAGI ) + printk(" DIAGI"); + if ( serror & SATA_DWC_SERR_DIAGW ) + printk(" DIAGW"); + if ( serror & SATA_DWC_SERR_DIAGB ) + printk(" DIAGB"); + if ( serror & SATA_DWC_SERR_DIAGT ) + printk(" DIAGT"); + if ( serror & SATA_DWC_SERR_DIAGC ) + printk(" DIAGC"); + if ( serror & SATA_DWC_SERR_DIAGH ) + printk(" DIAGH"); + if ( serror & SATA_DWC_SERR_DIAGL ) + printk(" DIAGL"); + if ( serror & SATA_DWC_SERR_DIAGS ) + printk(" DIAGS"); + if ( serror & SATA_DWC_SERR_DIAGF ) + printk(" DIAGF"); + if ( serror & SATA_DWC_SERR_DIAGX ) + printk(" DIAGX"); + if ( serror & SATA_DWC_SERR_DIAGA ) + printk(" DIAGA"); + printk("\n"); + } + +#if defined(CONFIG_SATA_DWC_VDEBUG) + printk("%s reading cfg.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].cfg.high))); + printk("%s reading cfg.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].cfg.low))); + printk("%s reading llp.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].llp.low))); + printk("%s reading ctl.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].ctl.low))); + printk("%s reading sar.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].sar.low))); + printk("%s reading sar.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].sar.high))); + printk("%s reading dar.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].dar.low))); + printk("%s reading dar.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].dar.high))); +#endif + + // Process hotplug for SATA port + if ( serror & (SATA_DWC_SERR_DIAGX | SATA_DWC_SERR_DIAGW)) { + dwc_port_info(ap, "Detect hot plug signal\n"); + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, serror & SATA_DWC_SERR_DIAGN ? "PHY RDY changed" : "device exchanged"); + freeze = true; + } + + // Process PHY internal error / Link sequence (illegal transition) error + if ( serror & (SATA_DWC_SERR_DIAGI | SATA_DWC_SERR_DIAGL)) { + ehi->err_mask |= AC_ERR_HSM; + ehi->action |= ATA_EH_RESET; + freeze = true; + } + + // Process Internal host adapter error + if ( serror & SATA_DWC_SERR_ERRE ) { + dev_err(ap->dev, "Detect Internal host adapter error\n"); + // --> need to review + ehi->err_mask |= AC_ERR_HOST_BUS; + ehi->action |= ATA_EH_RESET; + freeze = true; + } + + // Process Protocol Error + if ( serror & SATA_DWC_SERR_ERRP ) { + dev_err(ap->dev, "Detect Protocol error\n"); + ehi->err_mask |= AC_ERR_HSM; + ehi->action |= ATA_EH_RESET; + freeze = true; + } + + // Process non-recovered persistent communication error + if ( serror & SATA_DWC_SERR_ERRC ) { + dev_err(ap->dev, "Detect non-recovered persistent communication error\n"); + // --> TODO: review processing error + ehi->err_mask |= AC_ERR_ATA_BUS; + ehi->action |= ATA_EH_SOFTRESET; + //ehi->flags |= ATA_EHI_NO_AUTOPSY; + //freeze = true; + } + + // Non-recovered transient data integrity error + if ( serror & SATA_DWC_SERR_ERRT ) { + dev_err(ap->dev, "Detect non-recovered transient data integrity error\n"); + ehi->err_mask |= AC_ERR_ATA_BUS; + //ehi->err_mask |= AC_ERR_DEV; + ehi->action |= ATA_EH_SOFTRESET; + //ehi->flags |= ATA_EHI_NO_AUTOPSY; + } + + // Since below errors have been recovered by hardware + // they don't need any error processing. + if ( serror & SATA_DWC_SERR_ERRM ) { + dev_warn(ap->dev, "Detect recovered communication error"); + } + if ( serror & SATA_DWC_SERR_ERRI ) { + dev_warn(ap->dev, "Detect recovered data integrity error"); + } + + // If any error occur, process the qc + if (serror & (SATA_DWC_SERR_ERRT | SATA_DWC_SERR_ERRC)) { + //if (serror & 0x03f60f0) { + abort = true; + /* find out the offending link and qc */ + if (sata_pmp_attached(ap)) { + pmp = current_pmp(ap); + // If we are working on the PMP port + if ( pmp < ap->nr_pmp_links ) { + link = &ap->pmp_link[pmp]; + ehi = &link->eh_info; + active_qc = ata_qc_from_tag(ap, link->active_tag); + err_mask |= AC_ERR_DEV; + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); + } else { + err_mask |= AC_ERR_HSM; + action |= ATA_EH_RESET; + freeze = true; + } + + } + // Work on SATA port + else { + freeze = true; + active_qc = ata_qc_from_tag(ap, link->active_tag); + } + + if ( active_qc) { + active_qc->err_mask |= err_mask; + } else { + ehi->err_mask = err_mask; + } + } + + if ( freeze | abort ) { + //sata_dwc_qc_complete(ap, active_qc, 1); + // Terminate DMA channel if it is currenly in use + if ( dma_request_channel(ap) != -1 ) { + dwc_port_dbg(ap, "Terminate DMA channel %d for handling error\n", hsdev->dma_channel); + dma_dwc_terminate_dma(ap, hsdev->dma_channel); + } + } + + if (freeze) { + ret = ata_port_freeze(ap); + ata_port_printk(ap, KERN_INFO, "Freeze port with %d QCs aborted\n", ret); + } + else if (abort) { + if (active_qc) { + ret = ata_link_abort(active_qc->dev->link); + ata_link_printk(link, KERN_INFO, "Abort %d QCs\n", ret); + } else { + ret = ata_port_abort(ap); + ata_port_printk(ap, KERN_INFO, "Abort %d QCs on the SATA port\n", ret); + } + } +} + + +/* + * Function : sata_dwc_isr + * arguments : irq, void *dev_instance, struct pt_regs *regs + * Return value : irqreturn_t - status of IRQ + * This Interrupt handler called via port ops registered function. + * .irq_handler = sata_dwc_isr + */ +static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) +{ + struct ata_host *host = (struct ata_host *)dev_instance; + struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host); + struct ata_port *ap; + struct ata_queued_cmd *qc; + unsigned long flags; + u8 status, tag; + int handled, num_processed, port = 0; + u32 intpr, sactive, sactive2, tag_mask; + struct sata_dwc_device_port *hsdevp; + + spin_lock_irqsave(&host->lock, flags); + + /* Read the interrupt register */ + intpr = in_le32(&hsdev->sata_dwc_regs->intpr); + + ap = host->ports[port]; + hsdevp = HSDEVP_FROM_AP(ap); + + dwc_port_dbg(ap,"%s\n",__func__); + if ( intpr != 0x80000080) + dwc_port_dbg(ap, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, ap->link.active_tag); + //dwc_port_dbg(ap, "%s: INTMR=0x%08x, ERRMR=0x%08x\n", __func__, in_le32(&hsdev->sata_dwc_regs->intmr), in_le32(&hsdev->sata_dwc_regs->errmr)); + + /* Check for error interrupt */ + if (intpr & SATA_DWC_INTPR_ERR) { + sata_dwc_error_intr(ap, hsdev, intpr); + handled = 1; +#if defined(CONFIG_APOLLO3G) + signal_hdd_led(0 /*off blink*/, 1 /*red color*/); +#endif + goto done_irqrestore; + } + + /* Check for DMA SETUP FIS (FP DMA) interrupt */ + if (intpr & SATA_DWC_INTPR_NEWFP) { + dwc_port_dbg(ap, "%s: NEWFP INTERRUPT in HSDEV with DMA channel %d\n", __func__, hsdev->dma_channel); + clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP); + + tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr)); + dwc_dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); + if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PENDING) + dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); + + hsdevp->sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag); + + qc = ata_qc_from_tag(ap, tag); + /* + * Start FP DMA for NCQ command. At this point the tag is the + * active tag. It is the tag that matches the command about to + * be completed. + */ + qc->ap->link.active_tag = tag; + sata_dwc_bmdma_start_by_tag(qc, tag); + qc->ap->hsm_task_state = HSM_ST_LAST; + + handled = 1; + goto done_irqrestore; + } + + sactive = sata_dwc_core_scr_read(ap, SCR_ACTIVE); + tag_mask = (hsdevp->sata_dwc_sactive_issued | sactive) ^ sactive; + + /* If no sactive issued and tag_mask is zero then this is not NCQ */ + if (hsdevp->sata_dwc_sactive_issued == 0 && tag_mask == 0) { + if (ap->link.active_tag == ATA_TAG_POISON) + tag = 0; + else + tag = ap->link.active_tag; + qc = ata_qc_from_tag(ap, tag); + + /* DEV interrupt w/ no active qc? */ + if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { + dev_err(ap->dev, "%s intr with no active qc qc=%p\n", + __func__, qc); + ap->ops->sff_check_status(ap); + handled = 1; + goto done_irqrestore; + } + + status = ap->ops->sff_check_status(ap); + + qc->ap->link.active_tag = tag; + hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; + + if (status & ATA_ERR) { + dwc_dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); + sata_dwc_qc_complete(ap, qc, 1); + handled = 1; + goto done_irqrestore; + } + + dwc_dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", + __func__, prot_2_txt(qc->tf.protocol)); +drv_still_busy: + if (ata_is_dma(qc->tf.protocol)) { + int dma_flag = hsdevp->dma_pending[tag]; + /* + * Each DMA transaction produces 2 interrupts. The DMAC + * transfer complete interrupt and the SATA controller + * operation done interrupt. The command should be + * completed only after both interrupts are seen. + */ + hsdevp->dma_interrupt_count++; + if (unlikely(dma_flag == SATA_DWC_DMA_PENDING_NONE)) { + dev_err(ap->dev, "%s: DMA not pending " + "intpr=0x%08x status=0x%08x pend=%d\n", + __func__, intpr, status, dma_flag); + } + + if ((hsdevp->dma_interrupt_count % 2) == 0) + sata_dwc_dma_xfer_complete(ap, 1); + } else if (ata_is_pio(qc->tf.protocol)) { + ata_sff_hsm_move(ap, qc, status, 0); + handled = 1; + goto done_irqrestore; + } else { + if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) + goto drv_still_busy; + } + + handled = 1; + goto done_irqrestore; + } + + /* + * This is a NCQ command. At this point we need to figure out for which + * tags we have gotten a completion interrupt. One interrupt may serve + * as completion for more than one operation when commands are queued + * (NCQ). We need to process each completed command. + */ + +process_cmd: /* process completed commands */ + sactive = sata_dwc_core_scr_read(ap, SCR_ACTIVE); + tag_mask = (hsdevp->sata_dwc_sactive_issued | sactive) ^ sactive; + + if (sactive != 0 || hsdevp->sata_dwc_sactive_issued > 1 || tag_mask > 1) { + dwc_dev_dbg(ap->dev, "%s NCQ: sactive=0x%08x sactive_issued=0x%08x" + " tag_mask=0x%08x\n", __func__, sactive, + hsdevp->sata_dwc_sactive_issued, tag_mask); + } + + if (unlikely((tag_mask | hsdevp->sata_dwc_sactive_issued) != hsdevp->sata_dwc_sactive_issued)) { + dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x " + "sata_dwc_sactive_issued=0x%08x tag_mask=0x%08x\n", + sactive, hsdevp->sata_dwc_sactive_issued, tag_mask); + } + + /* read just to clear ... not bad if currently still busy */ + status = ap->ops->sff_check_status(ap); + dwc_dev_dbg(ap->dev, "%s ATA status register=0x%x, tag_mask=0x%x\n", __func__, status, tag_mask); + + tag = 0; + num_processed = 0; + while (tag_mask) { + num_processed++; + while (!(tag_mask & 0x00000001)) { + tag++; + tag_mask <<= 1; + } + tag_mask &= (~0x00000001); + qc = ata_qc_from_tag(ap, tag); + + /* To be picked up by completion functions */ + qc->ap->link.active_tag = tag; + hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; + + /* Let libata/scsi layers handle error */ + if (unlikely(status & ATA_ERR)) { + dwc_dev_vdbg(ap->dev, "%s ATA_ERR (0x%x)\n", + __func__, status); + + sata_dwc_qc_complete(ap, qc, 1); + handled = 1; + goto done_irqrestore; + } + + /* Process completed command */ + dwc_dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, + prot_2_txt(qc->tf.protocol)); + if (ata_is_dma(qc->tf.protocol)) { + hsdevp->dma_interrupt_count++; + if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) + dev_warn(ap->dev, + "%s: DMA not pending?\n", __func__); + if ((hsdevp->dma_interrupt_count % 2) == 0) + sata_dwc_dma_xfer_complete(ap, 1); + } else { + if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) + goto still_busy; + } + continue; + +still_busy: + ap->stats.idle_irq++; + dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n", + ap->print_id); + } /* while tag_mask */ + + /* + * Check to see if any commands completed while we were processing our + * initial set of completed commands (reading of status clears + * interrupts, so we might miss a completed command interrupt if one + * came in while we were processing: + * we read status as part of processing a completed command). + */ + sactive2 = sata_dwc_core_scr_read(ap, SCR_ACTIVE); + if (sactive2 != sactive) { + dwc_dev_dbg(ap->dev, "More finished - sactive=0x%x sactive2=0x%x\n", + sactive, sactive2); + goto process_cmd; + } + handled = 1; + +done_irqrestore: + spin_unlock_irqrestore(&host->lock, flags); +#if defined(CONFIG_APOLLO3G) + signal_hdd_led(0 /*off blink*/, -1 /* no color */); +#endif + return IRQ_RETVAL(handled); +} + + +/* + * Clear DMA Control Register after completing transferring data + * using AHB DMA. + */ +static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp); + + if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) { + // Clear receive channel enable bit + out_le32(&(hsdev->sata_dwc_regs->dmacr), + SATA_DWC_DMACR_RX_CLEAR( + in_le32(&(hsdev->sata_dwc_regs->dmacr)))); + } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) { + // Clear transmit channel enable bit + out_le32(&(hsdev->sata_dwc_regs->dmacr), + SATA_DWC_DMACR_TX_CLEAR( + in_le32(&(hsdev->sata_dwc_regs->dmacr)))); + } else { + /* + * This should not happen, it indicates the driver is out of + * sync. If it does happen, clear dmacr anyway. + */ + dev_err(hsdev->dev, "%s DMA protocol RX and TX DMA not pending " + "tag=0x%02x pending=%d dmacr: 0x%08x\n", + __func__, tag, hsdevp->dma_pending[tag], + in_le32(&(hsdev->sata_dwc_regs->dmacr))); + + // Clear all transmit and receive bit, but TXMOD bit is set to 1 + out_le32(&(hsdev->sata_dwc_regs->dmacr), + SATA_DWC_DMACR_TXRXCH_CLEAR); + } +} + +/* + * + */ +static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) +{ + struct ata_queued_cmd *qc; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + u8 tag = 0; + + tag = ap->link.active_tag; + qc = ata_qc_from_tag(ap, tag); + +#ifdef DEBUG_NCQ + if (tag > 0) { + dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s " + "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command, + dir_2_txt(qc->dma_dir), prot_2_txt(qc->tf.protocol), + in_le32(&(hsdev->sata_dwc_regs->dmacr))); + } +#endif + + if (ata_is_dma(qc->tf.protocol)) { + // DMA out of sync error + if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) { + dev_err(ap->dev, "%s DMA protocol RX and TX DMA not " + "pending dmacr: 0x%08x\n", __func__, + in_le32(&(hsdev->sata_dwc_regs->dmacr))); + } + + hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; + sata_dwc_qc_complete(ap, qc, check_status); + ap->link.active_tag = ATA_TAG_POISON; + } else { + sata_dwc_qc_complete(ap, qc, check_status); + } +} + +/* + * + */ +static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, + u32 check_status) +{ + u8 status = 0; + int i = 0; + u32 mask = 0x0; + u8 tag = qc->tag; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + u32 serror; + int dma_ch; + + dwc_dev_vdbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); + + if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)) + dev_err(ap->dev, "TX DMA PENDINGING\n"); + else if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)) + dev_err(ap->dev, "RX DMA PENDINGING\n"); + + if (check_status) { + i = 0; + do { + /* check main status, clearing INTRQ */ + status = ap->ops->sff_check_status(ap); + if (status & ATA_BUSY) { + dwc_dev_vdbg(ap->dev, "STATUS BUSY (0x%02x) [%d]\n", + status, i); + } + if (++i > 10) + break; + } while (status & ATA_BUSY); + + status = ap->ops->sff_check_status(ap); + if (unlikely(status & ATA_BUSY)) + dev_err(ap->dev, "QC complete cmd=0x%02x STATUS BUSY " + "(0x%02x) [%d]\n", qc->tf.command, status, i); + + + // Check error ==> need to process error here + serror = sata_dwc_core_scr_read(ap, SCR_ERROR); + if (unlikely(serror & SATA_DWC_SERR_ERR_BITS)) + { + dev_err(ap->dev, "****** SERROR=0x%08x ******\n", serror); + ap->link.eh_context.i.action |= ATA_EH_RESET; + if (ata_is_dma(qc->tf.protocol)) { + dma_ch = hsdevp->dma_chan[tag]; + dma_dwc_terminate_dma(ap, dma_ch); + } else { + dma_ch = hsdevp->dma_chan[0]; + dma_dwc_terminate_dma(ap, dma_ch); + } + } + } + dwc_dev_vdbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u: " + "protocol=%d\n", qc->tf.command, status, ap->print_id, + qc->tf.protocol); + + /* clear active bit */ + mask = (~(qcmd_tag_to_mask(tag))); + hsdevp->sata_dwc_sactive_queued = hsdevp->sata_dwc_sactive_queued & mask; + hsdevp->sata_dwc_sactive_issued = hsdevp->sata_dwc_sactive_issued & mask; + dwc_port_vdbg(ap, "%s - sata_dwc_sactive_queued=0x%08x, sata_dwc_sactive_issued=0x%08x\n",__func__, hsdevp->sata_dwc_sactive_queued, hsdevp->sata_dwc_sactive_issued); + dwc_port_vdbg(ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr))); + + /* Complete taskfile transaction (does not read SCR registers) */ + ata_qc_complete(qc); + + return 0; +} + +/* + * Clear interrupt and error flags in DMA status register. + */ +void sata_dwc_irq_clear (struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + dwc_port_dbg(ap,"%s\n",__func__); + + // Clear DMA interrupts + clear_chan_interrupts(hsdev->dma_channel); + //sata_dma_regs + //out_le32(&hsdev->sata_dwc_regs->intmr, + // in_le32(&hsdev->sata_dwc_regs->intmr) & ~SATA_DWC_INTMR_ERRM); + //out_le32(&hsdev->sata_dwc_regs->errmr, 0x0); + //sata_dwc_check_status(ap); +} + +/* + * Turn on IRQ + */ +void sata_dwc_irq_on(struct ata_port *ap) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + u8 tmp; + + dwc_port_dbg(ap,"%s\n",__func__); + ap->ctl &= ~ATA_NIEN; + ap->last_ctl = ap->ctl; + + if (ioaddr->ctl_addr) + iowrite8(ap->ctl, ioaddr->ctl_addr); + tmp = ata_wait_idle(ap); + + ap->ops->sff_irq_clear(ap); + enable_err_irq(hsdev); +} + + +/* + * This function enables the interrupts in IMR and unmasks them in ERRMR + * + */ +static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) +{ + // Enable interrupts + out_le32(&hsdev->sata_dwc_regs->intmr, + SATA_DWC_INTMR_ERRM | + SATA_DWC_INTMR_NEWFPM | + SATA_DWC_INTMR_PMABRTM | + SATA_DWC_INTMR_DMATM); + + /* + * Unmask the error bits that should trigger an error interrupt by + * setting the error mask register. + */ + out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERR_ERR_BITS); + + dwc_dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", __func__, + in_le32(&hsdev->sata_dwc_regs->intmr), + in_le32(&hsdev->sata_dwc_regs->errmr)); +} + +/* + * Configure DMA and interrupts on SATA port. This should be called after + * hardreset is executed on the SATA port. + */ +static void sata_dwc_init_port ( struct ata_port *ap ) { + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + + // Configure DMA + if (ap->port_no == 0) { + dwc_dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", + __func__); + + // Clear all transmit/receive bits + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXRXCH_CLEAR); + + dwc_dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__); + out_le32(&hsdev->sata_dwc_regs->dbtsr, + (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); + } + + // Enable interrupts + sata_dwc_enable_interrupts(hsdev); +} + + +/* + * Setup SATA ioport with corresponding register addresses + */ +static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) +{ + port->cmd_addr = (void *)base + 0x00; + port->data_addr = (void *)base + 0x00; + + port->error_addr = (void *)base + 0x04; + port->feature_addr = (void *)base + 0x04; + + port->nsect_addr = (void *)base + 0x08; + + port->lbal_addr = (void *)base + 0x0c; + port->lbam_addr = (void *)base + 0x10; + port->lbah_addr = (void *)base + 0x14; + + port->device_addr = (void *)base + 0x18; + port->command_addr = (void *)base + 0x1c; + port->status_addr = (void *)base + 0x1c; + + port->altstatus_addr = (void *)base + 0x20; + port->ctl_addr = (void *)base + 0x20; +} + + +/* + * Function : sata_dwc_port_start + * arguments : struct ata_ioports *port + * Return value : returns 0 if success, error code otherwise + * This function allocates the scatter gather LLI table for AHB DMA + */ +static int sata_dwc_port_start(struct ata_port *ap) +{ + int err = 0; + struct sata_dwc_device *hsdev; + struct sata_dwc_device_port *hsdevp = NULL; + struct device *pdev; + u32 sstatus; + int i; + + hsdev = HSDEV_FROM_AP(ap); + + dwc_dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no); + + hsdev->host = ap->host; + pdev = ap->host->dev; + if (!pdev) { + dev_err(ap->dev, "%s: no ap->host->dev\n", __func__); + err = -ENODEV; + goto cleanup_exit; + } + + /* Allocate Port Struct */ + hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL); + if (!hsdevp) { + dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__); + err = -ENOMEM; + goto cleanup_exit; + } + hsdevp->hsdev = hsdev; + + for (i = 0; i < SATA_DWC_QCMD_MAX; i++) + hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; + + ap->bmdma_prd = 0; /* set these so libata doesn't use them */ + ap->bmdma_prd_dma = 0; + + /* + * DMA - Assign scatter gather LLI table. We can't use the libata + * version since it's PRD is IDE PCI specific. + */ + for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { + hsdevp->llit[i] = dma_alloc_coherent(pdev, + SATA_DWC_DMAC_LLI_TBL_SZ, + &(hsdevp->llit_dma[i]), + GFP_ATOMIC); + if (!hsdevp->llit[i]) { + dev_err(ap->dev, "%s: dma_alloc_coherent failed size " + "0x%x\n", __func__, SATA_DWC_DMAC_LLI_TBL_SZ); + err = -ENOMEM; + goto cleanup_exit; + } + } + + if (ap->port_no == 0) { + dwc_dev_vdbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", + __func__); + + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXRXCH_CLEAR); + + dwc_dev_vdbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__); + out_le32(&hsdev->sata_dwc_regs->dbtsr, + (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); + ata_port_printk(ap, KERN_INFO, "%s: setting burst size in DBTSR: 0x%08x\n", + __func__, in_le32(&hsdev->sata_dwc_regs->dbtsr)); + } + + /* Clear any error bits before libata starts issuing commands */ + clear_serror(ap); + + ap->private_data = hsdevp; + + /* Are we in Gen I or II */ + sstatus = sata_dwc_core_scr_read(ap, SCR_STATUS); + switch (SATA_DWC_SCR0_SPD_GET(sstatus)) { + case 0x0: + dev_info(ap->dev, "**** No neg speed (nothing attached?) \n"); + break; + case 0x1: + dev_info(ap->dev, "**** GEN I speed rate negotiated \n"); + break; + case 0x2: + dev_info(ap->dev, "**** GEN II speed rate negotiated \n"); + break; + } + +cleanup_exit: + if (err) { + kfree(hsdevp); + sata_dwc_port_stop(ap); + dwc_dev_vdbg(ap->dev, "%s: fail\n", __func__); + } else { + dwc_dev_vdbg(ap->dev, "%s: done\n", __func__); + } + + return err; +} + + +static void sata_dwc_port_stop(struct ata_port *ap) +{ + int i; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + + dwc_port_dbg(ap, "%s: stop port\n", __func__); + + if (hsdevp && hsdev) { + /* deallocate LLI table */ + for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { + dma_free_coherent(ap->host->dev, + SATA_DWC_DMAC_LLI_TBL_SZ, + hsdevp->llit[i], hsdevp->llit_dma[i]); + } + + kfree(hsdevp); + } + ap->private_data = NULL; +} + +/* + * Since the SATA DWC is master only. The dev select operation will + * be removed. + */ +void sata_dwc_dev_select(struct ata_port *ap, unsigned int device) +{ + // Do nothing + ndelay(100); +} + +/* + * Function : sata_dwc_exec_command_by_tag + * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued + * Return value : None + * This function keeps track of individual command tag ids and calls + * ata_exec_command in libata + */ +static void sata_dwc_exec_command_by_tag(struct ata_port *ap, + struct ata_taskfile *tf, + u8 tag, u32 cmd_issued) +{ + unsigned long flags; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + + dwc_dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d, ap->link->tag=0x%08x\n", __func__, tf->command, + ata_cmd_2_txt(tf), tag, ap->link.active_tag); + + spin_lock_irqsave(&ap->host->lock, flags); + hsdevp->cmd_issued[tag] = cmd_issued; + spin_unlock_irqrestore(&ap->host->lock, flags); + + /* + * Clear SError before executing a new command. + * + * TODO if we read a PM's registers now, we will throw away the task + * file values loaded into the shadow registers for this command. + * + * sata_dwc_scr_write and read can not be used here. Clearing the PM + * managed SError register for the disk needs to be done before the + * task file is loaded. + */ + clear_serror(ap); + ata_sff_exec_command(ap, tf); +} + +static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) +{ + sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, + SATA_DWC_CMD_ISSUED_PENDING); +} + +static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) +{ + u8 tag = qc->tag; + + dwc_port_dbg(qc->ap, "%s\n", __func__); + if (ata_is_ncq(qc->tf.protocol)) { + dwc_dev_vdbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", + __func__, qc->ap->link.sactive, tag); + } else { + tag = 0; + } + + sata_dwc_bmdma_setup_by_tag(qc, tag); +} + +static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) +{ + volatile int start_dma; + u32 reg, dma_chan; + struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); + struct ata_port *ap = qc->ap; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + int dir = qc->dma_dir; + dma_chan = hsdevp->dma_chan[tag]; + + /* Used for ata_bmdma_start(qc) -- we are not BMDMA compatible */ + + if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { + start_dma = 1; + if (dir == DMA_TO_DEVICE) + hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX; + else + hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; + } else { + dev_err(ap->dev, "%s: Command not pending cmd_issued=%d " + "(tag=%d) - DMA NOT started\n", __func__, + hsdevp->cmd_issued[tag], tag); + start_dma = 0; + } + + dwc_dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s " + "start_dma? %x\n", __func__, qc, tag, qc->tf.command, + dir_2_txt(qc->dma_dir), start_dma); + sata_dwc_tf_dump(hsdev->dev, &(qc->tf)); + + // Start DMA transfer + if (start_dma) { + reg = sata_dwc_core_scr_read(ap, SCR_ERROR); + if (unlikely(reg & SATA_DWC_SERR_ERR_BITS)) { + dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", + __func__, reg); + //sata_async_notification(ap); + //return; + } + + // Set DMA control registers + if (dir == DMA_TO_DEVICE) + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXCHEN); + else + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_RXCHEN); + + dwc_dev_vdbg(ap->dev, "%s: setting DMACR: 0x%08x\n", __func__, in_le32(&hsdev->sata_dwc_regs->dmacr)); + /* Enable AHB DMA transfer on the specified channel */ + dma_dwc_xfer_start(dma_chan); + } +} + + +static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) +{ + u8 tag = qc->tag; + + if (ata_is_ncq(qc->tf.protocol)) { + dwc_dev_vdbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", + __func__, qc->ap->link.sactive, tag); + } else { + tag = 0; + } + + dwc_port_dbg(qc->ap, "%s, tag=0x%08x\n", __func__, tag); + sata_dwc_bmdma_start_by_tag(qc, tag); +} + +/* + * Function : sata_dwc_qc_prep_by_tag + * arguments : ata_queued_cmd *qc, u8 tag + * Return value : None + * qc_prep for a particular queued command based on tag + */ +static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) +{ + struct ata_port *ap = qc->ap; + int dma_chan; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + int dir; + + // DMA direction + dir = qc->dma_dir; + + if ((dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO)) + return; + + dwc_dev_vdbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", + __func__, ap->port_no, dir_2_txt(dir), qc->n_elem); + + // Setup DMA for transfer + dma_chan = dma_dwc_xfer_setup(qc, hsdevp->llit[tag], + hsdevp->llit_dma[tag], + (void *__iomem)(&hsdev->sata_dwc_regs->dmadr)); + + if (unlikely(dma_chan < 0)) { + dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", + __func__, dma_chan); + return; + } + + hsdevp->dma_chan[tag] = dma_chan; +} + + + +/** + * ata_sff_exec_command - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA command, with proper synchronization with interrupt + * handler / other threads. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void sata_dwc_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) +{ + iowrite8(tf->command, ap->ioaddr.command_addr); + /* If we have an mmio device with no ctl and no altstatus + * method this will fail. No such devices are known to exist. + */ + if (ap->ioaddr.altstatus_addr) + ioread8(ap->ioaddr.altstatus_addr); + ndelay(400); +} + +/** + * sata_dwc_tf_to_host - issue ATA taskfile to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA taskfile register set to ATA host controller, + * with proper synchronization with interrupt handler and + * other threads. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +static inline void sata_dwc_tf_to_host(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + dwc_port_dbg(ap,"%s\n",__func__); + ap->ops->sff_tf_load(ap, tf); + sata_dwc_exec_command(ap, tf); +} + + +/* + * Process command queue issue + */ +static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + int ret = 0; + struct ata_eh_info *ehi; + u32 scontrol, sstatus; + scontrol = sata_dwc_core_scr_read(ap, SCR_CONTROL); + + ehi = &ap->link.eh_info; + /* + * Fix the problem when PMP card is unplugged from the SATA port. + * QC is still issued but no device present. Ignore the current QC. + * and pass error to error handler + */ + sstatus = sata_dwc_core_scr_read(ap, SCR_STATUS); + if ( sstatus == 0x0) { + ata_port_printk(ap, KERN_INFO, "Detect connection lost while commands are executing --> ignore current command\n"); + ata_ehi_hotplugged(ehi); + ap->link.eh_context.i.action |= ATA_EH_RESET; + return ret; + } + + // Set PMP field in the SCONTROL register + if ( sata_pmp_attached(ap) ) + sata_dwc_pmp_select(ap, qc->dev->link->pmp); + +#ifdef DEBUG_NCQ + if (qc->tag > 0 || ap->link.sactive > 1) { + dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s" + " ap active_tag=0x%08x ap sactive=0x%08x\n", + __func__, ap->print_id, qc->tf.command, + ata_cmd_2_txt(&qc->tf), qc->tag, + prot_2_txt(qc->tf.protocol), ap->link.active_tag, + ap->link.sactive); + } +#endif + + // Process NCQ + if (ata_is_ncq(qc->tf.protocol)) { + dwc_link_dbg(qc->dev->link, "%s --> process NCQ , ap->link.active_tag=0x%08x, active_tag=0%08x\n", __func__, ap->link.active_tag, qc->tag); + ap->link.active_tag = qc->tag; + ap->ops->sff_tf_load(ap, &qc->tf); + sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, + SATA_DWC_CMD_ISSUED_PENDING); + } else { + dwc_link_dbg(qc->dev->link, "%s --> non NCQ process, ap->link.active_tag=%d, active_tag=0%08x\n", __func__, ap->link.active_tag, qc->tag); + // Sync ata_port with qc->tag + ap->link.active_tag = qc->tag; + ret = ata_bmdma_qc_issue(qc); + } + + return ret; +} + +#if 0 +/* + * Function : sata_dwc_eng_timeout + * arguments : ata_port *ap + * Return value : None + * error handler for DMA time out + * ata_eng_timeout(ap) -- this does bmdma stuff which can not be done by this + * driver. SEE ALSO ata_qc_timeout(ap) + */ +static void sata_dwc_eng_timeout(struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + struct ata_queued_cmd *qc; + u8 tag; + uint mask = 0x0; + unsigned long flags; + u32 serror, intpr, dma_ch; + + tag = ap->link.active_tag; + dma_ch = hsdevp->dma_chan[tag]; + qc = ata_qc_from_tag(ap, tag); + + dev_err(ap->dev, "%s: id=%d active_tag=%d qc=%p dma_chan=%d\n", + __func__, ap->print_id, tag, qc, dma_ch); + + intpr = in_le32(&hsdev->sata_dwc_regs->intpr); + serror = sata_dwc_core_scr_read(ap, SCR_ERROR); + + dev_err(ap->dev, "intpr=0x%08x serror=0x%08x\n", intpr, serror); + + /* If there are no error bits set, can we just pass this on to eh? */ + if (!(serror & SATA_DWC_SERR_ERR_BITS) && + !(intpr & SATA_DWC_INTPR_ERR)) { + + spin_lock_irqsave(&ap->host->lock, flags); + if (dma_dwc_channel_enabled(dma_ch)) + dma_dwc_terminate_dma(ap, dma_ch); + + hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; + + /* clear active bit */ + mask = (~(qcmd_tag_to_mask(tag))); + hsdevp->sata_dwc_sactive_queued = hsdevp->sata_dwc_sactive_queued & mask; + hsdevp->sata_dwc_sactive_issued = hsdevp->sata_dwc_sactive_issued & mask; + + spin_unlock_irqrestore(&ap->host->lock, flags); + } else { + /* This is wrong, what really needs to be done is a reset. */ + + spin_lock_irqsave(ap->lock, flags); + + if (ata_is_dma(qc->tf.protocol)) { + /* disable DMAC */ + dma_dwc_terminate_dma(ap, dma_ch); + } + + spin_unlock_irqrestore(ap->lock, flags); + } + WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); + if (qc->flags & ATA_QCFLAG_ACTIVE) { + qc->err_mask |= AC_ERR_TIMEOUT; + /* + * test-only: The original code (AMCC: 2.6.19) called + * ata_eng_timeout(ap) here. This function is not available + * anymore. So what to do now? + */ + } +} +#endif +/* + * Function : sata_dwc_qc_prep + * arguments : ata_queued_cmd *qc + * Return value : None + * qc_prep for a particular queued command + */ +static void sata_dwc_qc_prep(struct ata_queued_cmd *qc) +{ + u32 sactive; + u8 tag = qc->tag; + + if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO)) + return; + +#ifdef DEBUG_NCQ + if (qc->tag > 0) { + dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n", + __func__, qc->tag, qc->ap->link.active_tag); + } +#endif + + if (qc->tf.protocol == ATA_PROT_NCQ) { + sactive = sata_dwc_core_scr_read(qc->ap, SCR_ACTIVE); + sactive |= (0x00000001 << tag); + sata_dwc_core_scr_write(qc->ap, SCR_ACTIVE, sactive); + dwc_dev_vdbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x " + "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive, + sactive); + } else { + tag = 0; + } + + sata_dwc_qc_prep_by_tag(qc, tag); +} + + + +static void sata_dwc_post_internal_cmd(struct ata_queued_cmd *qc) +{ + if (qc->flags & ATA_QCFLAG_FAILED) + ata_eh_freeze_port(qc->ap); +} + +static void sata_dwc_error_handler(struct ata_port *ap) +{ + u32 serror; + u32 intmr, errmr; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + + serror = sata_dwc_core_scr_read(ap, SCR_ERROR); + intmr = in_le32(&hsdev->sata_dwc_regs->intmr); + errmr = in_le32(&hsdev->sata_dwc_regs->errmr); + + //sata_dwc_dma_xfer_complete(ap,1); + dwc_port_dbg(ap, "%s: SERROR=0x%08x, INTMR=0x%08x, ERRMR=0x%08x\n", __func__, serror, intmr, errmr); + + dwc_port_vdbg(ap, "%s - sata_dwc_sactive_queued=0x%08x, sata_dwc_sactive_issued=0x%08x\n",__func__, hsdevp->sata_dwc_sactive_queued, hsdevp->sata_dwc_sactive_issued); + dwc_port_vdbg(ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr))); + dwc_port_vdbg(ap, "qc_active=0x%08x, qc_allocated=0x%08x, active_tag=%d\n", ap->qc_active, ap->qc_allocated, ap->link.active_tag); + + sata_pmp_error_handler(ap); +} + +/* + * sata_dwc_check_status - Get value of the Status Register + * @ap: Port to check + * + * Output content of the status register (CDR7) + */ +u8 sata_dwc_check_status(struct ata_port *ap) +{ + return ioread8(ap->ioaddr.status_addr); +} + + +/* + * Freeze the port by clear interrupt + * @ap: Port to freeze + */ +void sata_dwc_freeze(struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + dwc_port_dbg(ap, "call %s ...\n",__func__); + // turn IRQ off + clear_intpr(hsdev); + clear_serror(ap); + out_le32(&hsdev->sata_dwc_regs->intmr, 0x0); +} + +/* + * Thaw the port by turning IRQ on + */ +void sata_dwc_thaw(struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + + dwc_port_dbg(ap, "call %s ...\n",__func__); + // Clear IRQ + clear_intpr(hsdev); + // Turn IRQ back on + sata_dwc_enable_interrupts(hsdev); +} + + +/* + * scsi mid-layer and libata interface structures + */ +static struct scsi_host_template sata_dwc_sht = { + ATA_NCQ_SHT(DRV_NAME), + /* + * test-only: Currently this driver doesn't handle NCQ + * correctly. We enable NCQ but set the queue depth to a + * max of 1. This will get fixed in in a future release. + */ +// .sg_tablesize = LIBATA_MAX_PRD, + .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */ + .dma_boundary = ATA_DMA_BOUNDARY, +}; + + +static struct ata_port_operations sata_dwc_ops = { + .inherits = &sata_pmp_port_ops, + .dev_config = sata_dwc_dev_config, + + .error_handler = sata_dwc_error_handler, + .softreset = sata_dwc_softreset, + .hardreset = sata_dwc_hardreset, + .pmp_softreset = sata_dwc_softreset, + .pmp_hardreset = sata_dwc_pmp_hardreset, + + .qc_defer = sata_pmp_qc_defer_cmd_switch, + .qc_prep = sata_dwc_qc_prep, + .qc_issue = sata_dwc_qc_issue, + .qc_fill_rtf = ata_sff_qc_fill_rtf, + + .scr_read = sata_dwc_scr_read, + .scr_write = sata_dwc_scr_write, + + .port_start = sata_dwc_port_start, + .port_stop = sata_dwc_port_stop, + + .bmdma_setup = sata_dwc_bmdma_setup, + .bmdma_start = sata_dwc_bmdma_start, + // Reuse some SFF functions + .sff_check_status = sata_dwc_check_status, + .sff_tf_read = ata_sff_tf_read, + .sff_data_xfer = ata_sff_data_xfer, + .sff_tf_load = ata_sff_tf_load, + .sff_dev_select = sata_dwc_dev_select, + .sff_exec_command = sata_dwc_exec_command, + + .sff_irq_on = sata_dwc_irq_on, +/* .sff_irq_clear = sata_dwc_irq_clear, + .freeze = sata_dwc_freeze, + .thaw = sata_dwc_thaw, + .sff_irq_on = ata_sff_irq_on, + */ + .sff_irq_clear = ata_bmdma_irq_clear, + .freeze = ata_sff_freeze, + .thaw = ata_sff_thaw, + .pmp_attach = sata_dwc_pmp_attach, + .pmp_detach = sata_dwc_pmp_detach, + .post_internal_cmd = sata_dwc_post_internal_cmd, + + /* test-only: really needed? */ + //.eng_timeout = sata_dwc_eng_timeout, +}; + +static const struct ata_port_info sata_dwc_port_info[] = { + { + /* + * test-only: Currently this driver doesn't handle NCQ + * correctly. So we disable NCQ here for now. To enable + * it ATA_FLAG_NCQ needs to be added to the flags below. + */ + .flags = ATA_FLAG_SATA | + ATA_FLAG_NCQ | + ATA_FLAG_PMP | ATA_FLAG_AN, + .pio_mask = ATA_PIO4, /* pio 0-4 */ + .udma_mask = ATA_UDMA6, + .port_ops = &sata_dwc_ops, + }, +}; + +static int sata_dwc_probe(struct platform_device *ofdev) +{ + struct sata_dwc_device *hsdev; + u32 idr, versionr; + char *ver = (char *)&versionr; + u8 *base = NULL; + int err = 0; + int irq; + struct ata_host *host; + struct ata_port_info pi = sata_dwc_port_info[0]; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct device_node *np = ofdev->dev.of_node; + const unsigned int *dma_channel; + /* + * Check if device is enabled + */ + if (!of_device_is_available(np)) { + printk(KERN_INFO "%s: Port disabled via device-tree\n", + np->full_name); + return 0; + } + + /* Allocate DWC SATA device */ + hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL); + if (hsdev == NULL) { + dev_err(&ofdev->dev, "kmalloc failed for hsdev\n"); + err = -ENOMEM; + goto error; + } + + + // Identify SATA DMA channel used for the current SATA device + dma_channel = of_get_property(np, "dma-channel", NULL); + if ( dma_channel ) { + dev_notice(&ofdev->dev, "Gettting DMA channel %d\n", *dma_channel); + hsdev->dma_channel = *dma_channel; + } else + hsdev->dma_channel = 0; + + /* Ioremap SATA registers */ + base = of_iomap(np, 0); + if (!base) { + dev_err(&ofdev->dev, "ioremap failed for SATA register address\n"); + err = -ENODEV; + goto error_kmalloc; + } + hsdev->reg_base = base; + dwc_dev_vdbg(&ofdev->dev, "ioremap done for SATA register address\n"); + + /* Synopsys DWC SATA specific Registers */ + hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET); + + /* Allocate and fill host */ + host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); + if (!host) { + dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n"); + err = -ENOMEM; + goto error_iomap; + } + + host->private_data = hsdev; + + /* Setup port */ + host->ports[0]->ioaddr.cmd_addr = base; + host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; + hsdev->scr_base = (u8 *)(base + SATA_DWC_SCR_OFFSET); + sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base); + + /* Read the ID and Version Registers */ + idr = in_le32(&hsdev->sata_dwc_regs->idr); + versionr = in_le32(&hsdev->sata_dwc_regs->versionr); + dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n", + idr, ver[0], ver[1], ver[2]); + + /* Get SATA DMA interrupt number */ + irq = irq_of_parse_and_map(np, 1); + if (irq == NO_IRQ) { + dev_err(&ofdev->dev, "no SATA DMA irq\n"); + err = -ENODEV; + goto error_out; + } + + /* Get physical SATA DMA register base address */ + if (!sata_dma_regs) { + sata_dma_regs = of_iomap(np, 1); + if (!sata_dma_regs) { + dev_err(&ofdev->dev, "ioremap failed for AHBDMA register address\n"); + err = -ENODEV; + goto error_out; + } + } + /* Save dev for later use in dev_xxx() routines */ + hsdev->dev = &ofdev->dev; + + /* Init glovbal dev list */ + dwc_dev_list[hsdev->dma_channel] = hsdev; + + /* Initialize AHB DMAC */ + hsdev->irq_dma = irq; + dma_dwc_init(hsdev); + dma_register_interrupt(hsdev); + + + /* Enable SATA Interrupts */ + sata_dwc_enable_interrupts(hsdev); + + /* Get SATA interrupt number */ + irq = irq_of_parse_and_map(np, 0); + if (irq == NO_IRQ) { + dev_err(&ofdev->dev, "no SATA irq\n"); + err = -ENODEV; + goto error_out; + } + + /* + * Now, register with libATA core, this will also initiate the + * device discovery process, invoking our port_start() handler & + * error_handler() to execute a dummy Softreset EH session + */ + ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); + + dev_set_drvdata(&ofdev->dev, host); + + /* Everything is fine */ + return 0; + +error_out: + /* Free SATA DMA resources */ + dma_dwc_exit(hsdev); + +error_iomap: + iounmap(base); +error_kmalloc: + kfree(hsdev); +error: + return err; +} + +static int sata_dwc_remove(struct platform_device *ofdev) +{ + struct device *dev = &ofdev->dev; + struct ata_host *host = dev_get_drvdata(dev); + struct sata_dwc_device *hsdev = host->private_data; + + ata_host_detach(host); + + dev_set_drvdata(dev, NULL); + + /* Free SATA DMA resources */ + dma_dwc_exit(hsdev); + + iounmap(hsdev->reg_base); + kfree(hsdev); + kfree(host); + + dwc_dev_vdbg(&ofdev->dev, "done\n"); + + return 0; +} + +static const struct of_device_id sata_dwc_match[] = { + { .compatible = "amcc,sata-460ex", }, + { .compatible = "amcc,sata-apm82181", }, + {} +}; +MODULE_DEVICE_TABLE(of, sata_dwc_match); + +static struct platform_driver sata_dwc_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = sata_dwc_match, + }, + .probe = sata_dwc_probe, + .remove = sata_dwc_remove, +}; + +module_platform_driver(sata_dwc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); +MODULE_DESCRIPTION("DesignWare Cores SATA controller driver"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c new file mode 100644 index 00000000000..616a6d2ac20 --- /dev/null +++ b/drivers/ata/sata_fsl.c @@ -0,0 +1,1644 @@ +/* + * drivers/ata/sata_fsl.c + * + * Freescale 3.0Gbps SATA device driver + * + * Author: Ashish Kalra <ashish.kalra@freescale.com> + * Li Yang <leoli@freescale.com> + * + * Copyright (c) 2006-2007, 2011-2012 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> +#include <linux/libata.h> +#include <asm/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> + +static unsigned int intr_coalescing_count; +module_param(intr_coalescing_count, int, S_IRUGO); +MODULE_PARM_DESC(intr_coalescing_count, + "INT coalescing count threshold (1..31)"); + +static unsigned int intr_coalescing_ticks; +module_param(intr_coalescing_ticks, int, S_IRUGO); +MODULE_PARM_DESC(intr_coalescing_ticks, + "INT coalescing timer threshold in AHB ticks"); +/* Controller information */ +enum { + SATA_FSL_QUEUE_DEPTH = 16, + SATA_FSL_MAX_PRD = 63, + SATA_FSL_MAX_PRD_USABLE = SATA_FSL_MAX_PRD - 1, + SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */ + + SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | + ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN), + + SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, + SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */ + SATA_FSL_CMD_SLOT_SIZE = (SATA_FSL_MAX_CMDS * SATA_FSL_CMD_HDR_SIZE), + + /* + * SATA-FSL host controller supports a max. of (15+1) direct PRDEs, and + * chained indirect PRDEs up to a max count of 63. + * We are allocating an array of 63 PRDEs contiguously, but PRDE#15 will + * be setup as an indirect descriptor, pointing to it's next + * (contiguous) PRDE. Though chained indirect PRDE arrays are + * supported,it will be more efficient to use a direct PRDT and + * a single chain/link to indirect PRDE array/PRDT. + */ + + SATA_FSL_CMD_DESC_CFIS_SZ = 32, + SATA_FSL_CMD_DESC_SFIS_SZ = 32, + SATA_FSL_CMD_DESC_ACMD_SZ = 16, + SATA_FSL_CMD_DESC_RSRVD = 16, + + SATA_FSL_CMD_DESC_SIZE = (SATA_FSL_CMD_DESC_CFIS_SZ + + SATA_FSL_CMD_DESC_SFIS_SZ + + SATA_FSL_CMD_DESC_ACMD_SZ + + SATA_FSL_CMD_DESC_RSRVD + + SATA_FSL_MAX_PRD * 16), + + SATA_FSL_CMD_DESC_OFFSET_TO_PRDT = + (SATA_FSL_CMD_DESC_CFIS_SZ + + SATA_FSL_CMD_DESC_SFIS_SZ + + SATA_FSL_CMD_DESC_ACMD_SZ + + SATA_FSL_CMD_DESC_RSRVD), + + SATA_FSL_CMD_DESC_AR_SZ = (SATA_FSL_CMD_DESC_SIZE * SATA_FSL_MAX_CMDS), + SATA_FSL_PORT_PRIV_DMA_SZ = (SATA_FSL_CMD_SLOT_SIZE + + SATA_FSL_CMD_DESC_AR_SZ), + + /* + * MPC8315 has two SATA controllers, SATA1 & SATA2 + * (one port per controller) + * MPC837x has 2/4 controllers, one port per controller + */ + + SATA_FSL_MAX_PORTS = 1, + + SATA_FSL_IRQ_FLAG = IRQF_SHARED, +}; + +/* + * Interrupt Coalescing Control Register bitdefs */ +enum { + ICC_MIN_INT_COUNT_THRESHOLD = 1, + ICC_MAX_INT_COUNT_THRESHOLD = ((1 << 5) - 1), + ICC_MIN_INT_TICKS_THRESHOLD = 0, + ICC_MAX_INT_TICKS_THRESHOLD = ((1 << 19) - 1), + ICC_SAFE_INT_TICKS = 1, +}; + +/* +* Host Controller command register set - per port +*/ +enum { + CQ = 0, + CA = 8, + CC = 0x10, + CE = 0x18, + DE = 0x20, + CHBA = 0x24, + HSTATUS = 0x28, + HCONTROL = 0x2C, + CQPMP = 0x30, + SIGNATURE = 0x34, + ICC = 0x38, + + /* + * Host Status Register (HStatus) bitdefs + */ + ONLINE = (1 << 31), + GOING_OFFLINE = (1 << 30), + BIST_ERR = (1 << 29), + CLEAR_ERROR = (1 << 27), + + FATAL_ERR_HC_MASTER_ERR = (1 << 18), + FATAL_ERR_PARITY_ERR_TX = (1 << 17), + FATAL_ERR_PARITY_ERR_RX = (1 << 16), + FATAL_ERR_DATA_UNDERRUN = (1 << 13), + FATAL_ERR_DATA_OVERRUN = (1 << 12), + FATAL_ERR_CRC_ERR_TX = (1 << 11), + FATAL_ERR_CRC_ERR_RX = (1 << 10), + FATAL_ERR_FIFO_OVRFL_TX = (1 << 9), + FATAL_ERR_FIFO_OVRFL_RX = (1 << 8), + + FATAL_ERROR_DECODE = FATAL_ERR_HC_MASTER_ERR | + FATAL_ERR_PARITY_ERR_TX | + FATAL_ERR_PARITY_ERR_RX | + FATAL_ERR_DATA_UNDERRUN | + FATAL_ERR_DATA_OVERRUN | + FATAL_ERR_CRC_ERR_TX | + FATAL_ERR_CRC_ERR_RX | + FATAL_ERR_FIFO_OVRFL_TX | FATAL_ERR_FIFO_OVRFL_RX, + + INT_ON_DATA_LENGTH_MISMATCH = (1 << 12), + INT_ON_FATAL_ERR = (1 << 5), + INT_ON_PHYRDY_CHG = (1 << 4), + + INT_ON_SIGNATURE_UPDATE = (1 << 3), + INT_ON_SNOTIFY_UPDATE = (1 << 2), + INT_ON_SINGL_DEVICE_ERR = (1 << 1), + INT_ON_CMD_COMPLETE = 1, + + INT_ON_ERROR = INT_ON_FATAL_ERR | INT_ON_SNOTIFY_UPDATE | + INT_ON_PHYRDY_CHG | INT_ON_SINGL_DEVICE_ERR, + + /* + * Host Control Register (HControl) bitdefs + */ + HCONTROL_ONLINE_PHY_RST = (1 << 31), + HCONTROL_FORCE_OFFLINE = (1 << 30), + HCONTROL_LEGACY = (1 << 28), + HCONTROL_PARITY_PROT_MOD = (1 << 14), + HCONTROL_DPATH_PARITY = (1 << 12), + HCONTROL_SNOOP_ENABLE = (1 << 10), + HCONTROL_PMP_ATTACHED = (1 << 9), + HCONTROL_COPYOUT_STATFIS = (1 << 8), + IE_ON_FATAL_ERR = (1 << 5), + IE_ON_PHYRDY_CHG = (1 << 4), + IE_ON_SIGNATURE_UPDATE = (1 << 3), + IE_ON_SNOTIFY_UPDATE = (1 << 2), + IE_ON_SINGL_DEVICE_ERR = (1 << 1), + IE_ON_CMD_COMPLETE = 1, + + DEFAULT_PORT_IRQ_ENABLE_MASK = IE_ON_FATAL_ERR | IE_ON_PHYRDY_CHG | + IE_ON_SIGNATURE_UPDATE | IE_ON_SNOTIFY_UPDATE | + IE_ON_SINGL_DEVICE_ERR | IE_ON_CMD_COMPLETE, + + EXT_INDIRECT_SEG_PRD_FLAG = (1 << 31), + DATA_SNOOP_ENABLE_V1 = (1 << 22), + DATA_SNOOP_ENABLE_V2 = (1 << 28), +}; + +/* + * SATA Superset Registers + */ +enum { + SSTATUS = 0, + SERROR = 4, + SCONTROL = 8, + SNOTIFY = 0xC, +}; + +/* + * Control Status Register Set + */ +enum { + TRANSCFG = 0, + TRANSSTATUS = 4, + LINKCFG = 8, + LINKCFG1 = 0xC, + LINKCFG2 = 0x10, + LINKSTATUS = 0x14, + LINKSTATUS1 = 0x18, + PHYCTRLCFG = 0x1C, + COMMANDSTAT = 0x20, +}; + +/* TRANSCFG (transport-layer) configuration control */ +enum { + TRANSCFG_RX_WATER_MARK = (1 << 4), +}; + +/* PHY (link-layer) configuration control */ +enum { + PHY_BIST_ENABLE = 0x01, +}; + +/* + * Command Header Table entry, i.e, command slot + * 4 Dwords per command slot, command header size == 64 Dwords. + */ +struct cmdhdr_tbl_entry { + u32 cda; + u32 prde_fis_len; + u32 ttl; + u32 desc_info; +}; + +/* + * Description information bitdefs + */ +enum { + CMD_DESC_RES = (1 << 11), + VENDOR_SPECIFIC_BIST = (1 << 10), + CMD_DESC_SNOOP_ENABLE = (1 << 9), + FPDMA_QUEUED_CMD = (1 << 8), + SRST_CMD = (1 << 7), + BIST = (1 << 6), + ATAPI_CMD = (1 << 5), +}; + +/* + * Command Descriptor + */ +struct command_desc { + u8 cfis[8 * 4]; + u8 sfis[8 * 4]; + u8 acmd[4 * 4]; + u8 fill[4 * 4]; + u32 prdt[SATA_FSL_MAX_PRD_DIRECT * 4]; + u32 prdt_indirect[(SATA_FSL_MAX_PRD - SATA_FSL_MAX_PRD_DIRECT) * 4]; +}; + +/* + * Physical region table descriptor(PRD) + */ + +struct prde { + u32 dba; + u8 fill[2 * 4]; + u32 ddc_and_ext; +}; + +/* + * ata_port private data + * This is our per-port instance data. + */ +struct sata_fsl_port_priv { + struct cmdhdr_tbl_entry *cmdslot; + dma_addr_t cmdslot_paddr; + struct command_desc *cmdentry; + dma_addr_t cmdentry_paddr; +}; + +/* + * ata_port->host_set private data + */ +struct sata_fsl_host_priv { + void __iomem *hcr_base; + void __iomem *ssr_base; + void __iomem *csr_base; + int irq; + int data_snoop; + struct device_attribute intr_coalescing; + struct device_attribute rx_watermark; +}; + +static void fsl_sata_set_irq_coalescing(struct ata_host *host, + unsigned int count, unsigned int ticks) +{ + struct sata_fsl_host_priv *host_priv = host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + unsigned long flags; + + if (count > ICC_MAX_INT_COUNT_THRESHOLD) + count = ICC_MAX_INT_COUNT_THRESHOLD; + else if (count < ICC_MIN_INT_COUNT_THRESHOLD) + count = ICC_MIN_INT_COUNT_THRESHOLD; + + if (ticks > ICC_MAX_INT_TICKS_THRESHOLD) + ticks = ICC_MAX_INT_TICKS_THRESHOLD; + else if ((ICC_MIN_INT_TICKS_THRESHOLD == ticks) && + (count > ICC_MIN_INT_COUNT_THRESHOLD)) + ticks = ICC_SAFE_INT_TICKS; + + spin_lock_irqsave(&host->lock, flags); + iowrite32((count << 24 | ticks), hcr_base + ICC); + + intr_coalescing_count = count; + intr_coalescing_ticks = ticks; + spin_unlock_irqrestore(&host->lock, flags); + + DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", + intr_coalescing_count, intr_coalescing_ticks); + DPRINTK("ICC register status: (hcr base: 0x%x) = 0x%x\n", + hcr_base, ioread32(hcr_base + ICC)); +} + +static ssize_t fsl_sata_intr_coalescing_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d %d\n", + intr_coalescing_count, intr_coalescing_ticks); +} + +static ssize_t fsl_sata_intr_coalescing_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int coalescing_count, coalescing_ticks; + + if (sscanf(buf, "%d%d", + &coalescing_count, + &coalescing_ticks) != 2) { + printk(KERN_ERR "fsl-sata: wrong parameter format.\n"); + return -EINVAL; + } + + fsl_sata_set_irq_coalescing(dev_get_drvdata(dev), + coalescing_count, coalescing_ticks); + + return strlen(buf); +} + +static ssize_t fsl_sata_rx_watermark_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int rx_watermark; + unsigned long flags; + struct ata_host *host = dev_get_drvdata(dev); + struct sata_fsl_host_priv *host_priv = host->private_data; + void __iomem *csr_base = host_priv->csr_base; + + spin_lock_irqsave(&host->lock, flags); + rx_watermark = ioread32(csr_base + TRANSCFG); + rx_watermark &= 0x1f; + + spin_unlock_irqrestore(&host->lock, flags); + return sprintf(buf, "%d\n", rx_watermark); +} + +static ssize_t fsl_sata_rx_watermark_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int rx_watermark; + unsigned long flags; + struct ata_host *host = dev_get_drvdata(dev); + struct sata_fsl_host_priv *host_priv = host->private_data; + void __iomem *csr_base = host_priv->csr_base; + u32 temp; + + if (sscanf(buf, "%d", &rx_watermark) != 1) { + printk(KERN_ERR "fsl-sata: wrong parameter format.\n"); + return -EINVAL; + } + + spin_lock_irqsave(&host->lock, flags); + temp = ioread32(csr_base + TRANSCFG); + temp &= 0xffffffe0; + iowrite32(temp | rx_watermark, csr_base + TRANSCFG); + + spin_unlock_irqrestore(&host->lock, flags); + return strlen(buf); +} + +static inline unsigned int sata_fsl_tag(unsigned int tag, + void __iomem *hcr_base) +{ + /* We let libATA core do actual (queue) tag allocation */ + + /* all non NCQ/queued commands should have tag#0 */ + if (ata_tag_internal(tag)) { + DPRINTK("mapping internal cmds to tag#0\n"); + return 0; + } + + if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) { + DPRINTK("tag %d invalid : out of range\n", tag); + return 0; + } + + if (unlikely((ioread32(hcr_base + CQ)) & (1 << tag))) { + DPRINTK("tag %d invalid : in use!!\n", tag); + return 0; + } + + return tag; +} + +static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp, + unsigned int tag, u32 desc_info, + u32 data_xfer_len, u8 num_prde, + u8 fis_len) +{ + dma_addr_t cmd_descriptor_address; + + cmd_descriptor_address = pp->cmdentry_paddr + + tag * SATA_FSL_CMD_DESC_SIZE; + + /* NOTE: both data_xfer_len & fis_len are Dword counts */ + + pp->cmdslot[tag].cda = cpu_to_le32(cmd_descriptor_address); + pp->cmdslot[tag].prde_fis_len = + cpu_to_le32((num_prde << 16) | (fis_len << 2)); + pp->cmdslot[tag].ttl = cpu_to_le32(data_xfer_len & ~0x03); + pp->cmdslot[tag].desc_info = cpu_to_le32(desc_info | (tag & 0x1F)); + + VPRINTK("cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n", + pp->cmdslot[tag].cda, + pp->cmdslot[tag].prde_fis_len, + pp->cmdslot[tag].ttl, pp->cmdslot[tag].desc_info); + +} + +static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, + u32 *ttl, dma_addr_t cmd_desc_paddr, + int data_snoop) +{ + struct scatterlist *sg; + unsigned int num_prde = 0; + u32 ttl_dwords = 0; + + /* + * NOTE : direct & indirect prdt's are contiguously allocated + */ + struct prde *prd = (struct prde *)&((struct command_desc *) + cmd_desc)->prdt; + + struct prde *prd_ptr_to_indirect_ext = NULL; + unsigned indirect_ext_segment_sz = 0; + dma_addr_t indirect_ext_segment_paddr; + unsigned int si; + + VPRINTK("SATA FSL : cd = 0x%p, prd = 0x%p\n", cmd_desc, prd); + + indirect_ext_segment_paddr = cmd_desc_paddr + + SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16; + + for_each_sg(qc->sg, sg, qc->n_elem, si) { + dma_addr_t sg_addr = sg_dma_address(sg); + u32 sg_len = sg_dma_len(sg); + + VPRINTK("SATA FSL : fill_sg, sg_addr = 0x%llx, sg_len = %d\n", + (unsigned long long)sg_addr, sg_len); + + /* warn if each s/g element is not dword aligned */ + if (unlikely(sg_addr & 0x03)) + ata_port_err(qc->ap, "s/g addr unaligned : 0x%llx\n", + (unsigned long long)sg_addr); + if (unlikely(sg_len & 0x03)) + ata_port_err(qc->ap, "s/g len unaligned : 0x%x\n", + sg_len); + + if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) && + sg_next(sg) != NULL) { + VPRINTK("setting indirect prde\n"); + prd_ptr_to_indirect_ext = prd; + prd->dba = cpu_to_le32(indirect_ext_segment_paddr); + indirect_ext_segment_sz = 0; + ++prd; + ++num_prde; + } + + ttl_dwords += sg_len; + prd->dba = cpu_to_le32(sg_addr); + prd->ddc_and_ext = cpu_to_le32(data_snoop | (sg_len & ~0x03)); + + VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n", + ttl_dwords, prd->dba, prd->ddc_and_ext); + + ++num_prde; + ++prd; + if (prd_ptr_to_indirect_ext) + indirect_ext_segment_sz += sg_len; + } + + if (prd_ptr_to_indirect_ext) { + /* set indirect extension flag along with indirect ext. size */ + prd_ptr_to_indirect_ext->ddc_and_ext = + cpu_to_le32((EXT_INDIRECT_SEG_PRD_FLAG | + data_snoop | + (indirect_ext_segment_sz & ~0x03))); + } + + *ttl = ttl_dwords; + return num_prde; +} + +static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct sata_fsl_port_priv *pp = ap->private_data; + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); + struct command_desc *cd; + u32 desc_info = CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE; + u32 num_prde = 0; + u32 ttl_dwords = 0; + dma_addr_t cd_paddr; + + cd = (struct command_desc *)pp->cmdentry + tag; + cd_paddr = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE; + + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *) &cd->cfis); + + VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n", + cd->cfis[0], cd->cfis[1], cd->cfis[2]); + + if (qc->tf.protocol == ATA_PROT_NCQ) { + VPRINTK("FPDMA xfer,Sctor cnt[0:7],[8:15] = %d,%d\n", + cd->cfis[3], cd->cfis[11]); + } + + /* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */ + if (ata_is_atapi(qc->tf.protocol)) { + desc_info |= ATAPI_CMD; + memset((void *)&cd->acmd, 0, 32); + memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len); + } + + if (qc->flags & ATA_QCFLAG_DMAMAP) + num_prde = sata_fsl_fill_sg(qc, (void *)cd, + &ttl_dwords, cd_paddr, + host_priv->data_snoop); + + if (qc->tf.protocol == ATA_PROT_NCQ) + desc_info |= FPDMA_QUEUED_CMD; + + sata_fsl_setup_cmd_hdr_entry(pp, tag, desc_info, ttl_dwords, + num_prde, 5); + + VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n", + desc_info, ttl_dwords, num_prde); +} + +static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); + + VPRINTK("xx_qc_issue called,CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n", + ioread32(CQ + hcr_base), + ioread32(CA + hcr_base), + ioread32(CE + hcr_base), ioread32(CC + hcr_base)); + + iowrite32(qc->dev->link->pmp, CQPMP + hcr_base); + + /* Simply queue command to the controller/device */ + iowrite32(1 << tag, CQ + hcr_base); + + VPRINTK("xx_qc_issue called, tag=%d, CQ=0x%x, CA=0x%x\n", + tag, ioread32(CQ + hcr_base), ioread32(CA + hcr_base)); + + VPRINTK("CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n", + ioread32(CE + hcr_base), + ioread32(DE + hcr_base), + ioread32(CC + hcr_base), + ioread32(COMMANDSTAT + host_priv->csr_base)); + + return 0; +} + +static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc) +{ + struct sata_fsl_port_priv *pp = qc->ap->private_data; + struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); + struct command_desc *cd; + + cd = pp->cmdentry + tag; + + ata_tf_from_fis(cd->sfis, &qc->result_tf); + return true; +} + +static int sata_fsl_scr_write(struct ata_link *link, + unsigned int sc_reg_in, u32 val) +{ + struct sata_fsl_host_priv *host_priv = link->ap->host->private_data; + void __iomem *ssr_base = host_priv->ssr_base; + unsigned int sc_reg; + + switch (sc_reg_in) { + case SCR_STATUS: + case SCR_ERROR: + case SCR_CONTROL: + case SCR_ACTIVE: + sc_reg = sc_reg_in; + break; + default: + return -EINVAL; + } + + VPRINTK("xx_scr_write, reg_in = %d\n", sc_reg); + + iowrite32(val, ssr_base + (sc_reg * 4)); + return 0; +} + +static int sata_fsl_scr_read(struct ata_link *link, + unsigned int sc_reg_in, u32 *val) +{ + struct sata_fsl_host_priv *host_priv = link->ap->host->private_data; + void __iomem *ssr_base = host_priv->ssr_base; + unsigned int sc_reg; + + switch (sc_reg_in) { + case SCR_STATUS: + case SCR_ERROR: + case SCR_CONTROL: + case SCR_ACTIVE: + sc_reg = sc_reg_in; + break; + default: + return -EINVAL; + } + + VPRINTK("xx_scr_read, reg_in = %d\n", sc_reg); + + *val = ioread32(ssr_base + (sc_reg * 4)); + return 0; +} + +static void sata_fsl_freeze(struct ata_port *ap) +{ + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 temp; + + VPRINTK("xx_freeze, CQ=0x%x, CA=0x%x, CE=0x%x, DE=0x%x\n", + ioread32(CQ + hcr_base), + ioread32(CA + hcr_base), + ioread32(CE + hcr_base), ioread32(DE + hcr_base)); + VPRINTK("CmdStat = 0x%x\n", + ioread32(host_priv->csr_base + COMMANDSTAT)); + + /* disable interrupts on the controller/port */ + temp = ioread32(hcr_base + HCONTROL); + iowrite32((temp & ~0x3F), hcr_base + HCONTROL); + + VPRINTK("in xx_freeze : HControl = 0x%x, HStatus = 0x%x\n", + ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS)); +} + +static void sata_fsl_thaw(struct ata_port *ap) +{ + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 temp; + + /* ack. any pending IRQs for this controller/port */ + temp = ioread32(hcr_base + HSTATUS); + + VPRINTK("xx_thaw, pending IRQs = 0x%x\n", (temp & 0x3F)); + + if (temp & 0x3F) + iowrite32((temp & 0x3F), hcr_base + HSTATUS); + + /* enable interrupts on the controller/port */ + temp = ioread32(hcr_base + HCONTROL); + iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL); + + VPRINTK("xx_thaw : HControl = 0x%x, HStatus = 0x%x\n", + ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS)); +} + +static void sata_fsl_pmp_attach(struct ata_port *ap) +{ + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 temp; + + temp = ioread32(hcr_base + HCONTROL); + iowrite32((temp | HCONTROL_PMP_ATTACHED), hcr_base + HCONTROL); +} + +static void sata_fsl_pmp_detach(struct ata_port *ap) +{ + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 temp; + + temp = ioread32(hcr_base + HCONTROL); + temp &= ~HCONTROL_PMP_ATTACHED; + iowrite32(temp, hcr_base + HCONTROL); + + /* enable interrupts on the controller/port */ + temp = ioread32(hcr_base + HCONTROL); + iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL); + +} + +static int sata_fsl_port_start(struct ata_port *ap) +{ + struct device *dev = ap->host->dev; + struct sata_fsl_port_priv *pp; + void *mem; + dma_addr_t mem_dma; + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 temp; + + pp = kzalloc(sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; + + mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, + GFP_KERNEL); + if (!mem) { + kfree(pp); + return -ENOMEM; + } + memset(mem, 0, SATA_FSL_PORT_PRIV_DMA_SZ); + + pp->cmdslot = mem; + pp->cmdslot_paddr = mem_dma; + + mem += SATA_FSL_CMD_SLOT_SIZE; + mem_dma += SATA_FSL_CMD_SLOT_SIZE; + + pp->cmdentry = mem; + pp->cmdentry_paddr = mem_dma; + + ap->private_data = pp; + + VPRINTK("CHBA = 0x%x, cmdentry_phys = 0x%x\n", + pp->cmdslot_paddr, pp->cmdentry_paddr); + + /* Now, update the CHBA register in host controller cmd register set */ + iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA); + + /* + * Now, we can bring the controller on-line & also initiate + * the COMINIT sequence, we simply return here and the boot-probing + * & device discovery process is re-initiated by libATA using a + * Softreset EH (dummy) session. Hence, boot probing and device + * discovey will be part of sata_fsl_softreset() callback. + */ + + temp = ioread32(hcr_base + HCONTROL); + iowrite32((temp | HCONTROL_ONLINE_PHY_RST), hcr_base + HCONTROL); + + VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); + VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); + VPRINTK("CHBA = 0x%x\n", ioread32(hcr_base + CHBA)); + + return 0; +} + +static void sata_fsl_port_stop(struct ata_port *ap) +{ + struct device *dev = ap->host->dev; + struct sata_fsl_port_priv *pp = ap->private_data; + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 temp; + + /* + * Force host controller to go off-line, aborting current operations + */ + temp = ioread32(hcr_base + HCONTROL); + temp &= ~HCONTROL_ONLINE_PHY_RST; + temp |= HCONTROL_FORCE_OFFLINE; + iowrite32(temp, hcr_base + HCONTROL); + + /* Poll for controller to go offline - should happen immediately */ + ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE, 1, 1); + + ap->private_data = NULL; + dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, + pp->cmdslot, pp->cmdslot_paddr); + + kfree(pp); +} + +static unsigned int sata_fsl_dev_classify(struct ata_port *ap) +{ + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + struct ata_taskfile tf; + u32 temp; + + temp = ioread32(hcr_base + SIGNATURE); + + VPRINTK("raw sig = 0x%x\n", temp); + VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); + VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); + + tf.lbah = (temp >> 24) & 0xff; + tf.lbam = (temp >> 16) & 0xff; + tf.lbal = (temp >> 8) & 0xff; + tf.nsect = temp & 0xff; + + return ata_dev_classify(&tf); +} + +static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 temp; + int i = 0; + unsigned long start_jiffies; + + DPRINTK("in xx_hardreset\n"); + +try_offline_again: + /* + * Force host controller to go off-line, aborting current operations + */ + temp = ioread32(hcr_base + HCONTROL); + temp &= ~HCONTROL_ONLINE_PHY_RST; + iowrite32(temp, hcr_base + HCONTROL); + + /* Poll for controller to go offline */ + temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE, + 1, 500); + + if (temp & ONLINE) { + ata_port_err(ap, "Hardreset failed, not off-lined %d\n", i); + + /* + * Try to offline controller atleast twice + */ + i++; + if (i == 2) + goto err; + else + goto try_offline_again; + } + + DPRINTK("hardreset, controller off-lined\n"); + VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); + VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); + + /* + * PHY reset should remain asserted for atleast 1ms + */ + ata_msleep(ap, 1); + + /* + * Now, bring the host controller online again, this can take time + * as PHY reset and communication establishment, 1st D2H FIS and + * device signature update is done, on safe side assume 500ms + * NOTE : Host online status may be indicated immediately!! + */ + + temp = ioread32(hcr_base + HCONTROL); + temp |= (HCONTROL_ONLINE_PHY_RST | HCONTROL_SNOOP_ENABLE); + temp |= HCONTROL_PMP_ATTACHED; + iowrite32(temp, hcr_base + HCONTROL); + + temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, 0, 1, 500); + + if (!(temp & ONLINE)) { + ata_port_err(ap, "Hardreset failed, not on-lined\n"); + goto err; + } + + DPRINTK("hardreset, controller off-lined & on-lined\n"); + VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); + VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); + + /* + * First, wait for the PHYRDY change to occur before waiting for + * the signature, and also verify if SStatus indicates device + * presence + */ + + temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0, 1, 500); + if ((!(temp & 0x10)) || ata_link_offline(link)) { + ata_port_warn(ap, "No Device OR PHYRDY change,Hstatus = 0x%x\n", + ioread32(hcr_base + HSTATUS)); + *class = ATA_DEV_NONE; + return 0; + } + + /* + * Wait for the first D2H from device,i.e,signature update notification + */ + start_jiffies = jiffies; + temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0x10, + 500, jiffies_to_msecs(deadline - start_jiffies)); + + if ((temp & 0xFF) != 0x18) { + ata_port_warn(ap, "No Signature Update\n"); + *class = ATA_DEV_NONE; + goto do_followup_srst; + } else { + ata_port_info(ap, "Signature Update detected @ %d msecs\n", + jiffies_to_msecs(jiffies - start_jiffies)); + *class = sata_fsl_dev_classify(ap); + return 0; + } + +do_followup_srst: + /* + * request libATA to perform follow-up softreset + */ + return -EAGAIN; + +err: + return -EIO; +} + +static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct sata_fsl_port_priv *pp = ap->private_data; + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + int pmp = sata_srst_pmp(link); + u32 temp; + struct ata_taskfile tf; + u8 *cfis; + u32 Serror; + + DPRINTK("in xx_softreset\n"); + + if (ata_link_offline(link)) { + DPRINTK("PHY reports no device\n"); + *class = ATA_DEV_NONE; + return 0; + } + + /* + * Send a device reset (SRST) explicitly on command slot #0 + * Check : will the command queue (reg) be cleared during offlining ?? + * Also we will be online only if Phy commn. has been established + * and device presence has been detected, therefore if we have + * reached here, we can send a command to the target device + */ + + DPRINTK("Sending SRST/device reset\n"); + + ata_tf_init(link->device, &tf); + cfis = (u8 *) &pp->cmdentry->cfis; + + /* device reset/SRST is a control register update FIS, uses tag0 */ + sata_fsl_setup_cmd_hdr_entry(pp, 0, + SRST_CMD | CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, 0, 0, 5); + + tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */ + ata_tf_to_fis(&tf, pmp, 0, cfis); + + DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n", + cfis[0], cfis[1], cfis[2], cfis[3]); + + /* + * Queue SRST command to the controller/device, ensure that no + * other commands are active on the controller/device + */ + + DPRINTK("@Softreset, CQ = 0x%x, CA = 0x%x, CC = 0x%x\n", + ioread32(CQ + hcr_base), + ioread32(CA + hcr_base), ioread32(CC + hcr_base)); + + iowrite32(0xFFFF, CC + hcr_base); + if (pmp != SATA_PMP_CTRL_PORT) + iowrite32(pmp, CQPMP + hcr_base); + iowrite32(1, CQ + hcr_base); + + temp = ata_wait_register(ap, CQ + hcr_base, 0x1, 0x1, 1, 5000); + if (temp & 0x1) { + ata_port_warn(ap, "ATA_SRST issue failed\n"); + + DPRINTK("Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n", + ioread32(CQ + hcr_base), + ioread32(CA + hcr_base), ioread32(CC + hcr_base)); + + sata_fsl_scr_read(&ap->link, SCR_ERROR, &Serror); + + DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); + DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); + DPRINTK("Serror = 0x%x\n", Serror); + goto err; + } + + ata_msleep(ap, 1); + + /* + * SATA device enters reset state after receiving a Control register + * FIS with SRST bit asserted and it awaits another H2D Control reg. + * FIS with SRST bit cleared, then the device does internal diags & + * initialization, followed by indicating it's initialization status + * using ATA signature D2H register FIS to the host controller. + */ + + sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, + 0, 0, 5); + + tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */ + ata_tf_to_fis(&tf, pmp, 0, cfis); + + if (pmp != SATA_PMP_CTRL_PORT) + iowrite32(pmp, CQPMP + hcr_base); + iowrite32(1, CQ + hcr_base); + ata_msleep(ap, 150); /* ?? */ + + /* + * The above command would have signalled an interrupt on command + * complete, which needs special handling, by clearing the Nth + * command bit of the CCreg + */ + iowrite32(0x01, CC + hcr_base); /* We know it will be cmd#0 always */ + + DPRINTK("SATA FSL : Now checking device signature\n"); + + *class = ATA_DEV_NONE; + + /* Verify if SStatus indicates device presence */ + if (ata_link_online(link)) { + /* + * if we are here, device presence has been detected, + * 1st D2H FIS would have been received, but sfis in + * command desc. is not updated, but signature register + * would have been updated + */ + + *class = sata_fsl_dev_classify(ap); + + DPRINTK("class = %d\n", *class); + VPRINTK("ccreg = 0x%x\n", ioread32(hcr_base + CC)); + VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE)); + } + + return 0; + +err: + return -EIO; +} + +static void sata_fsl_error_handler(struct ata_port *ap) +{ + + DPRINTK("in xx_error_handler\n"); + sata_pmp_error_handler(ap); + +} + +static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) +{ + if (qc->flags & ATA_QCFLAG_FAILED) + qc->err_mask |= AC_ERR_OTHER; + + if (qc->err_mask) { + /* make DMA engine forget about the failed command */ + + } +} + +static void sata_fsl_error_intr(struct ata_port *ap) +{ + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 hstatus, dereg=0, cereg = 0, SError = 0; + unsigned int err_mask = 0, action = 0; + int freeze = 0, abort=0; + struct ata_link *link = NULL; + struct ata_queued_cmd *qc = NULL; + struct ata_eh_info *ehi; + + hstatus = ioread32(hcr_base + HSTATUS); + cereg = ioread32(hcr_base + CE); + + /* first, analyze and record host port events */ + link = &ap->link; + ehi = &link->eh_info; + ata_ehi_clear_desc(ehi); + + /* + * Handle & Clear SError + */ + + sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError); + if (unlikely(SError & 0xFFFF0000)) + sata_fsl_scr_write(&ap->link, SCR_ERROR, SError); + + DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n", + hstatus, cereg, ioread32(hcr_base + DE), SError); + + /* handle fatal errors */ + if (hstatus & FATAL_ERROR_DECODE) { + ehi->err_mask |= AC_ERR_ATA_BUS; + ehi->action |= ATA_EH_SOFTRESET; + + freeze = 1; + } + + /* Handle SDB FIS receive & notify update */ + if (hstatus & INT_ON_SNOTIFY_UPDATE) + sata_async_notification(ap); + + /* Handle PHYRDY change notification */ + if (hstatus & INT_ON_PHYRDY_CHG) { + DPRINTK("SATA FSL: PHYRDY change indication\n"); + + /* Setup a soft-reset EH action */ + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, "%s", "PHY RDY changed"); + freeze = 1; + } + + /* handle single device errors */ + if (cereg) { + /* + * clear the command error, also clears queue to the device + * in error, and we can (re)issue commands to this device. + * When a device is in error all commands queued into the + * host controller and at the device are considered aborted + * and the queue for that device is stopped. Now, after + * clearing the device error, we can issue commands to the + * device to interrogate it to find the source of the error. + */ + abort = 1; + + DPRINTK("single device error, CE=0x%x, DE=0x%x\n", + ioread32(hcr_base + CE), ioread32(hcr_base + DE)); + + /* find out the offending link and qc */ + if (ap->nr_pmp_links) { + unsigned int dev_num; + + dereg = ioread32(hcr_base + DE); + iowrite32(dereg, hcr_base + DE); + iowrite32(cereg, hcr_base + CE); + + dev_num = ffs(dereg) - 1; + if (dev_num < ap->nr_pmp_links && dereg != 0) { + link = &ap->pmp_link[dev_num]; + ehi = &link->eh_info; + qc = ata_qc_from_tag(ap, link->active_tag); + /* + * We should consider this as non fatal error, + * and TF must be updated as done below. + */ + + err_mask |= AC_ERR_DEV; + + } else { + err_mask |= AC_ERR_HSM; + action |= ATA_EH_HARDRESET; + freeze = 1; + } + } else { + dereg = ioread32(hcr_base + DE); + iowrite32(dereg, hcr_base + DE); + iowrite32(cereg, hcr_base + CE); + + qc = ata_qc_from_tag(ap, link->active_tag); + /* + * We should consider this as non fatal error, + * and TF must be updated as done below. + */ + err_mask |= AC_ERR_DEV; + } + } + + /* record error info */ + if (qc) + qc->err_mask |= err_mask; + else + ehi->err_mask |= err_mask; + + ehi->action |= action; + + /* freeze or abort */ + if (freeze) + ata_port_freeze(ap); + else if (abort) { + if (qc) + ata_link_abort(qc->dev->link); + else + ata_port_abort(ap); + } +} + +static void sata_fsl_host_intr(struct ata_port *ap) +{ + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 hstatus, done_mask = 0; + struct ata_queued_cmd *qc; + u32 SError; + u32 tag; + u32 status_mask = INT_ON_ERROR; + + hstatus = ioread32(hcr_base + HSTATUS); + + sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError); + + /* Read command completed register */ + done_mask = ioread32(hcr_base + CC); + + /* Workaround for data length mismatch errata */ + if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) { + for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { + qc = ata_qc_from_tag(ap, tag); + if (qc && ata_is_atapi(qc->tf.protocol)) { + u32 hcontrol; + /* Set HControl[27] to clear error registers */ + hcontrol = ioread32(hcr_base + HCONTROL); + iowrite32(hcontrol | CLEAR_ERROR, + hcr_base + HCONTROL); + + /* Clear HControl[27] */ + iowrite32(hcontrol & ~CLEAR_ERROR, + hcr_base + HCONTROL); + + /* Clear SError[E] bit */ + sata_fsl_scr_write(&ap->link, SCR_ERROR, + SError); + + /* Ignore fatal error and device error */ + status_mask &= ~(INT_ON_SINGL_DEVICE_ERR + | INT_ON_FATAL_ERR); + break; + } + } + } + + if (unlikely(SError & 0xFFFF0000)) { + DPRINTK("serror @host_intr : 0x%x\n", SError); + sata_fsl_error_intr(ap); + } + + if (unlikely(hstatus & status_mask)) { + DPRINTK("error interrupt!!\n"); + sata_fsl_error_intr(ap); + return; + } + + VPRINTK("Status of all queues :\n"); + VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n", + done_mask, + ioread32(hcr_base + CA), + ioread32(hcr_base + CE), + ioread32(hcr_base + CQ), + ap->qc_active); + + if (done_mask & ap->qc_active) { + int i; + /* clear CC bit, this will also complete the interrupt */ + iowrite32(done_mask, hcr_base + CC); + + DPRINTK("Status of all queues :\n"); + DPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x\n", + done_mask, ioread32(hcr_base + CA), + ioread32(hcr_base + CE)); + + for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { + if (done_mask & (1 << i)) + DPRINTK + ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", + i, ioread32(hcr_base + CC), + ioread32(hcr_base + CA)); + } + ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + return; + + } else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) { + iowrite32(1, hcr_base + CC); + qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL); + + DPRINTK("completing non-ncq cmd, CC=0x%x\n", + ioread32(hcr_base + CC)); + + if (qc) { + ata_qc_complete(qc); + } + } else { + /* Spurious Interrupt!! */ + DPRINTK("spurious interrupt!!, CC = 0x%x\n", + ioread32(hcr_base + CC)); + iowrite32(done_mask, hcr_base + CC); + return; + } +} + +static irqreturn_t sata_fsl_interrupt(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + struct sata_fsl_host_priv *host_priv = host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 interrupt_enables; + unsigned handled = 0; + struct ata_port *ap; + + /* ack. any pending IRQs for this controller/port */ + interrupt_enables = ioread32(hcr_base + HSTATUS); + interrupt_enables &= 0x3F; + + DPRINTK("interrupt status 0x%x\n", interrupt_enables); + + if (!interrupt_enables) + return IRQ_NONE; + + spin_lock(&host->lock); + + /* Assuming one port per host controller */ + + ap = host->ports[0]; + if (ap) { + sata_fsl_host_intr(ap); + } else { + dev_warn(host->dev, "interrupt on disabled port 0\n"); + } + + iowrite32(interrupt_enables, hcr_base + HSTATUS); + handled = 1; + + spin_unlock(&host->lock); + + return IRQ_RETVAL(handled); +} + +/* + * Multiple ports are represented by multiple SATA controllers with + * one port per controller + */ +static int sata_fsl_init_controller(struct ata_host *host) +{ + struct sata_fsl_host_priv *host_priv = host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 temp; + + /* + * NOTE : We cannot bring the controller online before setting + * the CHBA, hence main controller initialization is done as + * part of the port_start() callback + */ + + /* sata controller to operate in enterprise mode */ + temp = ioread32(hcr_base + HCONTROL); + iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL); + + /* ack. any pending IRQs for this controller/port */ + temp = ioread32(hcr_base + HSTATUS); + if (temp & 0x3F) + iowrite32((temp & 0x3F), hcr_base + HSTATUS); + + /* Keep interrupts disabled on the controller */ + temp = ioread32(hcr_base + HCONTROL); + iowrite32((temp & ~0x3F), hcr_base + HCONTROL); + + /* Disable interrupt coalescing control(icc), for the moment */ + DPRINTK("icc = 0x%x\n", ioread32(hcr_base + ICC)); + iowrite32(0x01000000, hcr_base + ICC); + + /* clear error registers, SError is cleared by libATA */ + iowrite32(0x00000FFFF, hcr_base + CE); + iowrite32(0x00000FFFF, hcr_base + DE); + + /* + * reset the number of command complete bits which will cause the + * interrupt to be signaled + */ + fsl_sata_set_irq_coalescing(host, intr_coalescing_count, + intr_coalescing_ticks); + + /* + * host controller will be brought on-line, during xx_port_start() + * callback, that should also initiate the OOB, COMINIT sequence + */ + + DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); + DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); + + return 0; +} + +/* + * scsi mid-layer and libata interface structures + */ +static struct scsi_host_template sata_fsl_sht = { + ATA_NCQ_SHT("sata_fsl"), + .can_queue = SATA_FSL_QUEUE_DEPTH, + .sg_tablesize = SATA_FSL_MAX_PRD_USABLE, + .dma_boundary = ATA_DMA_BOUNDARY, +}; + +static struct ata_port_operations sata_fsl_ops = { + .inherits = &sata_pmp_port_ops, + + .qc_defer = ata_std_qc_defer, + .qc_prep = sata_fsl_qc_prep, + .qc_issue = sata_fsl_qc_issue, + .qc_fill_rtf = sata_fsl_qc_fill_rtf, + + .scr_read = sata_fsl_scr_read, + .scr_write = sata_fsl_scr_write, + + .freeze = sata_fsl_freeze, + .thaw = sata_fsl_thaw, + .softreset = sata_fsl_softreset, + .hardreset = sata_fsl_hardreset, + .pmp_softreset = sata_fsl_softreset, + .error_handler = sata_fsl_error_handler, + .post_internal_cmd = sata_fsl_post_internal_cmd, + + .port_start = sata_fsl_port_start, + .port_stop = sata_fsl_port_stop, + + .pmp_attach = sata_fsl_pmp_attach, + .pmp_detach = sata_fsl_pmp_detach, +}; + +static const struct ata_port_info sata_fsl_port_info[] = { + { + .flags = SATA_FSL_HOST_FLAGS, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &sata_fsl_ops, + }, +}; + +static int sata_fsl_probe(struct platform_device *ofdev) +{ + int retval = -ENXIO; + void __iomem *hcr_base = NULL; + void __iomem *ssr_base = NULL; + void __iomem *csr_base = NULL; + struct sata_fsl_host_priv *host_priv = NULL; + int irq; + struct ata_host *host = NULL; + u32 temp; + + struct ata_port_info pi = sata_fsl_port_info[0]; + const struct ata_port_info *ppi[] = { &pi, NULL }; + + dev_info(&ofdev->dev, "Sata FSL Platform/CSB Driver init\n"); + + hcr_base = of_iomap(ofdev->dev.of_node, 0); + if (!hcr_base) + goto error_exit_with_cleanup; + + ssr_base = hcr_base + 0x100; + csr_base = hcr_base + 0x140; + + if (!of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc8315-sata")) { + temp = ioread32(csr_base + TRANSCFG); + temp = temp & 0xffffffe0; + iowrite32(temp | TRANSCFG_RX_WATER_MARK, csr_base + TRANSCFG); + } + + DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG)); + DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc)); + DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE); + + host_priv = kzalloc(sizeof(struct sata_fsl_host_priv), GFP_KERNEL); + if (!host_priv) + goto error_exit_with_cleanup; + + host_priv->hcr_base = hcr_base; + host_priv->ssr_base = ssr_base; + host_priv->csr_base = csr_base; + + irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); + if (irq < 0) { + dev_err(&ofdev->dev, "invalid irq from platform\n"); + goto error_exit_with_cleanup; + } + host_priv->irq = irq; + + if (of_device_is_compatible(ofdev->dev.of_node, "fsl,pq-sata-v2")) + host_priv->data_snoop = DATA_SNOOP_ENABLE_V2; + else + host_priv->data_snoop = DATA_SNOOP_ENABLE_V1; + + /* allocate host structure */ + host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_FSL_MAX_PORTS); + if (!host) { + retval = -ENOMEM; + goto error_exit_with_cleanup; + } + + /* host->iomap is not used currently */ + host->private_data = host_priv; + + /* initialize host controller */ + sata_fsl_init_controller(host); + + /* + * Now, register with libATA core, this will also initiate the + * device discovery process, invoking our port_start() handler & + * error_handler() to execute a dummy Softreset EH session + */ + ata_host_activate(host, irq, sata_fsl_interrupt, SATA_FSL_IRQ_FLAG, + &sata_fsl_sht); + + platform_set_drvdata(ofdev, host); + + host_priv->intr_coalescing.show = fsl_sata_intr_coalescing_show; + host_priv->intr_coalescing.store = fsl_sata_intr_coalescing_store; + sysfs_attr_init(&host_priv->intr_coalescing.attr); + host_priv->intr_coalescing.attr.name = "intr_coalescing"; + host_priv->intr_coalescing.attr.mode = S_IRUGO | S_IWUSR; + retval = device_create_file(host->dev, &host_priv->intr_coalescing); + if (retval) + goto error_exit_with_cleanup; + + host_priv->rx_watermark.show = fsl_sata_rx_watermark_show; + host_priv->rx_watermark.store = fsl_sata_rx_watermark_store; + sysfs_attr_init(&host_priv->rx_watermark.attr); + host_priv->rx_watermark.attr.name = "rx_watermark"; + host_priv->rx_watermark.attr.mode = S_IRUGO | S_IWUSR; + retval = device_create_file(host->dev, &host_priv->rx_watermark); + if (retval) { + device_remove_file(&ofdev->dev, &host_priv->intr_coalescing); + goto error_exit_with_cleanup; + } + + return 0; + +error_exit_with_cleanup: + + if (host) + ata_host_detach(host); + + if (hcr_base) + iounmap(hcr_base); + kfree(host_priv); + + return retval; +} + +static int sata_fsl_remove(struct platform_device *ofdev) +{ + struct ata_host *host = platform_get_drvdata(ofdev); + struct sata_fsl_host_priv *host_priv = host->private_data; + + device_remove_file(&ofdev->dev, &host_priv->intr_coalescing); + device_remove_file(&ofdev->dev, &host_priv->rx_watermark); + + ata_host_detach(host); + + irq_dispose_mapping(host_priv->irq); + iounmap(host_priv->hcr_base); + kfree(host_priv); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sata_fsl_suspend(struct platform_device *op, pm_message_t state) +{ + struct ata_host *host = platform_get_drvdata(op); + return ata_host_suspend(host, state); +} + +static int sata_fsl_resume(struct platform_device *op) +{ + struct ata_host *host = platform_get_drvdata(op); + struct sata_fsl_host_priv *host_priv = host->private_data; + int ret; + void __iomem *hcr_base = host_priv->hcr_base; + struct ata_port *ap = host->ports[0]; + struct sata_fsl_port_priv *pp = ap->private_data; + + ret = sata_fsl_init_controller(host); + if (ret) { + dev_err(&op->dev, "Error initializing hardware\n"); + return ret; + } + + /* Recovery the CHBA register in host controller cmd register set */ + iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA); + + iowrite32((ioread32(hcr_base + HCONTROL) + | HCONTROL_ONLINE_PHY_RST + | HCONTROL_SNOOP_ENABLE + | HCONTROL_PMP_ATTACHED), + hcr_base + HCONTROL); + + ata_host_resume(host); + return 0; +} +#endif + +static struct of_device_id fsl_sata_match[] = { + { + .compatible = "fsl,pq-sata", + }, + { + .compatible = "fsl,pq-sata-v2", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, fsl_sata_match); + +static struct platform_driver fsl_sata_driver = { + .driver = { + .name = "fsl-sata", + .owner = THIS_MODULE, + .of_match_table = fsl_sata_match, + }, + .probe = sata_fsl_probe, + .remove = sata_fsl_remove, +#ifdef CONFIG_PM_SLEEP + .suspend = sata_fsl_suspend, + .resume = sata_fsl_resume, +#endif +}; + +module_platform_driver(fsl_sata_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ashish Kalra, Freescale Semiconductor"); +MODULE_DESCRIPTION("Freescale 3.0Gbps SATA controller low level driver"); +MODULE_VERSION("1.10"); diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c new file mode 100644 index 00000000000..65965cf5af0 --- /dev/null +++ b/drivers/ata/sata_highbank.c @@ -0,0 +1,650 @@ +/* + * Calxeda Highbank AHCI SATA platform driver + * Copyright 2012 Calxeda, Inc. + * + * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/kernel.h> +#include <linux/gfp.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/libata.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/gpio.h> +#include <linux/of_gpio.h> + +#include "ahci.h" + +#define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f)) +#define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2) +#define SERDES_CR_CTL 0x80a0 +#define SERDES_CR_ADDR 0x80a1 +#define SERDES_CR_DATA 0x80a2 +#define CR_BUSY 0x0001 +#define CR_START 0x0001 +#define CR_WR_RDN 0x0002 +#define CPHY_TX_INPUT_STS 0x2001 +#define CPHY_RX_INPUT_STS 0x2002 +#define CPHY_SATA_TX_OVERRIDE 0x8000 +#define CPHY_SATA_RX_OVERRIDE 0x4000 +#define CPHY_TX_OVERRIDE 0x2004 +#define CPHY_RX_OVERRIDE 0x2005 +#define SPHY_LANE 0x100 +#define SPHY_HALF_RATE 0x0001 +#define CPHY_SATA_DPLL_MODE 0x0700 +#define CPHY_SATA_DPLL_SHIFT 8 +#define CPHY_SATA_DPLL_RESET (1 << 11) +#define CPHY_SATA_TX_ATTEN 0x1c00 +#define CPHY_SATA_TX_ATTEN_SHIFT 10 +#define CPHY_PHY_COUNT 6 +#define CPHY_LANE_COUNT 4 +#define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT) + +static DEFINE_SPINLOCK(cphy_lock); +/* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based + * sata ports to their phys and then to their lanes within the phys + */ +struct phy_lane_info { + void __iomem *phy_base; + u8 lane_mapping; + u8 phy_devs; + u8 tx_atten; +}; +static struct phy_lane_info port_data[CPHY_PORT_COUNT]; + +static DEFINE_SPINLOCK(sgpio_lock); +#define SCLOCK 0 +#define SLOAD 1 +#define SDATA 2 +#define SGPIO_PINS 3 +#define SGPIO_PORTS 8 + +struct ecx_plat_data { + u32 n_ports; + /* number of extra clocks that the SGPIO PIC controller expects */ + u32 pre_clocks; + u32 post_clocks; + unsigned sgpio_gpio[SGPIO_PINS]; + u32 sgpio_pattern; + u32 port_to_sgpio[SGPIO_PORTS]; +}; + +#define SGPIO_SIGNALS 3 +#define ECX_ACTIVITY_BITS 0x300000 +#define ECX_ACTIVITY_SHIFT 0 +#define ECX_LOCATE_BITS 0x80000 +#define ECX_LOCATE_SHIFT 1 +#define ECX_FAULT_BITS 0x400000 +#define ECX_FAULT_SHIFT 2 +static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, + u32 shift) +{ + return 1 << (3 * pdata->port_to_sgpio[port] + shift); +} + +static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state) +{ + if (state & ECX_ACTIVITY_BITS) + pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port, + ECX_ACTIVITY_SHIFT); + else + pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port, + ECX_ACTIVITY_SHIFT); + if (state & ECX_LOCATE_BITS) + pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port, + ECX_LOCATE_SHIFT); + else + pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port, + ECX_LOCATE_SHIFT); + if (state & ECX_FAULT_BITS) + pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port, + ECX_FAULT_SHIFT); + else + pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port, + ECX_FAULT_SHIFT); +} + +/* + * Tell the LED controller that the signal has changed by raising the clock + * line for 50 uS and then lowering it for 50 uS. + */ +static void ecx_led_cycle_clock(struct ecx_plat_data *pdata) +{ + gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1); + udelay(50); + gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0); + udelay(50); +} + +static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state, + ssize_t size) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ecx_plat_data *pdata = hpriv->plat_data; + struct ahci_port_priv *pp = ap->private_data; + unsigned long flags; + int pmp, i; + struct ahci_em_priv *emp; + u32 sgpio_out; + + /* get the slot number from the message */ + pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; + if (pmp < EM_MAX_SLOTS) + emp = &pp->em_priv[pmp]; + else + return -EINVAL; + + if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED)) + return size; + + spin_lock_irqsave(&sgpio_lock, flags); + ecx_parse_sgpio(pdata, ap->port_no, state); + sgpio_out = pdata->sgpio_pattern; + for (i = 0; i < pdata->pre_clocks; i++) + ecx_led_cycle_clock(pdata); + + gpio_set_value(pdata->sgpio_gpio[SLOAD], 1); + ecx_led_cycle_clock(pdata); + gpio_set_value(pdata->sgpio_gpio[SLOAD], 0); + /* + * bit-bang out the SGPIO pattern, by consuming a bit and then + * clocking it out. + */ + for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) { + gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1); + sgpio_out >>= 1; + ecx_led_cycle_clock(pdata); + } + for (i = 0; i < pdata->post_clocks; i++) + ecx_led_cycle_clock(pdata); + + /* save off new led state for port/slot */ + emp->led_state = state; + + spin_unlock_irqrestore(&sgpio_lock, flags); + return size; +} + +static void highbank_set_em_messages(struct device *dev, + struct ahci_host_priv *hpriv, + struct ata_port_info *pi) +{ + struct device_node *np = dev->of_node; + struct ecx_plat_data *pdata = hpriv->plat_data; + int i; + int err; + + for (i = 0; i < SGPIO_PINS; i++) { + err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i); + if (IS_ERR_VALUE(err)) + return; + + pdata->sgpio_gpio[i] = err; + err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO"); + if (err) { + pr_err("sata_highbank gpio_request %d failed: %d\n", + i, err); + return; + } + gpio_direction_output(pdata->sgpio_gpio[i], 1); + } + of_property_read_u32_array(np, "calxeda,led-order", + pdata->port_to_sgpio, + pdata->n_ports); + if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks)) + pdata->pre_clocks = 0; + if (of_property_read_u32(np, "calxeda,post-clocks", + &pdata->post_clocks)) + pdata->post_clocks = 0; + + /* store em_loc */ + hpriv->em_loc = 0; + hpriv->em_buf_sz = 4; + hpriv->em_msg_type = EM_MSG_TYPE_LED; + pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY; +} + +static u32 __combo_phy_reg_read(u8 sata_port, u32 addr) +{ + u32 data; + u8 dev = port_data[sata_port].phy_devs; + spin_lock(&cphy_lock); + writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800); + data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr)); + spin_unlock(&cphy_lock); + return data; +} + +static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data) +{ + u8 dev = port_data[sata_port].phy_devs; + spin_lock(&cphy_lock); + writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800); + writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr)); + spin_unlock(&cphy_lock); +} + +static void combo_phy_wait_for_ready(u8 sata_port) +{ + while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY) + udelay(5); +} + +static u32 combo_phy_read(u8 sata_port, u32 addr) +{ + combo_phy_wait_for_ready(sata_port); + __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr); + __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START); + combo_phy_wait_for_ready(sata_port); + return __combo_phy_reg_read(sata_port, SERDES_CR_DATA); +} + +static void combo_phy_write(u8 sata_port, u32 addr, u32 data) +{ + combo_phy_wait_for_ready(sata_port); + __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr); + __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data); + __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START); +} + +static void highbank_cphy_disable_overrides(u8 sata_port) +{ + u8 lane = port_data[sata_port].lane_mapping; + u32 tmp; + if (unlikely(port_data[sata_port].phy_base == NULL)) + return; + tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE); + tmp &= ~CPHY_SATA_RX_OVERRIDE; + combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); +} + +static void cphy_override_tx_attenuation(u8 sata_port, u32 val) +{ + u8 lane = port_data[sata_port].lane_mapping; + u32 tmp; + + if (val & 0x8) + return; + + tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE); + tmp &= ~CPHY_SATA_TX_OVERRIDE; + combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp); + + tmp |= CPHY_SATA_TX_OVERRIDE; + combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp); + + tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN; + combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp); +} + +static void cphy_override_rx_mode(u8 sata_port, u32 val) +{ + u8 lane = port_data[sata_port].lane_mapping; + u32 tmp; + tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE); + tmp &= ~CPHY_SATA_RX_OVERRIDE; + combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); + + tmp |= CPHY_SATA_RX_OVERRIDE; + combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); + + tmp &= ~CPHY_SATA_DPLL_MODE; + tmp |= val << CPHY_SATA_DPLL_SHIFT; + combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); + + tmp |= CPHY_SATA_DPLL_RESET; + combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); + + tmp &= ~CPHY_SATA_DPLL_RESET; + combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp); + + msleep(15); +} + +static void highbank_cphy_override_lane(u8 sata_port) +{ + u8 lane = port_data[sata_port].lane_mapping; + u32 tmp, k = 0; + + if (unlikely(port_data[sata_port].phy_base == NULL)) + return; + do { + tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + + lane * SPHY_LANE); + } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000)); + cphy_override_rx_mode(sata_port, 3); + cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten); +} + +static int highbank_initialize_phys(struct device *dev, void __iomem *addr) +{ + struct device_node *sata_node = dev->of_node; + int phy_count = 0, phy, port = 0, i; + void __iomem *cphy_base[CPHY_PHY_COUNT] = {}; + struct device_node *phy_nodes[CPHY_PHY_COUNT] = {}; + u32 tx_atten[CPHY_PORT_COUNT] = {}; + + memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT); + + do { + u32 tmp; + struct of_phandle_args phy_data; + if (of_parse_phandle_with_args(sata_node, + "calxeda,port-phys", "#phy-cells", + port, &phy_data)) + break; + for (phy = 0; phy < phy_count; phy++) { + if (phy_nodes[phy] == phy_data.np) + break; + } + if (phy_nodes[phy] == NULL) { + phy_nodes[phy] = phy_data.np; + cphy_base[phy] = of_iomap(phy_nodes[phy], 0); + if (cphy_base[phy] == NULL) { + return 0; + } + phy_count += 1; + } + port_data[port].lane_mapping = phy_data.args[0]; + of_property_read_u32(phy_nodes[phy], "phydev", &tmp); + port_data[port].phy_devs = tmp; + port_data[port].phy_base = cphy_base[phy]; + of_node_put(phy_data.np); + port += 1; + } while (port < CPHY_PORT_COUNT); + of_property_read_u32_array(sata_node, "calxeda,tx-atten", + tx_atten, port); + for (i = 0; i < port; i++) + port_data[i].tx_atten = (u8) tx_atten[i]; + return 0; +} + +/* + * The Calxeda SATA phy intermittently fails to bring up a link with Gen3 + * Retrying the phy hard reset can work around the issue, but the drive + * may fail again. In less than 150 out of 15000 test runs, it took more + * than 10 tries for the link to be established (but never more than 35). + * Triple the maximum observed retry count to provide plenty of margin for + * rare events and to guarantee that the link is established. + * + * Also, the default 2 second time-out on a failed drive is too long in + * this situation. The uboot implementation of the same driver function + * uses a much shorter time-out period and never experiences a time out + * issue. Reducing the time-out to 500ms improves the responsiveness. + * The other timing constants were kept the same as the stock AHCI driver. + * This change was also tested 15000 times on 24 drives and none of them + * experienced a time out. + */ +static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + static const unsigned long timing[] = { 5, 100, 500}; + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_host_priv *hpriv = ap->host->private_data; + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; + struct ata_taskfile tf; + bool online; + u32 sstatus; + int rc; + int retry = 100; + + ahci_stop_engine(ap); + + /* clear D2H reception area to properly wait for D2H FIS */ + ata_tf_init(link->device, &tf); + tf.command = ATA_BUSY; + ata_tf_to_fis(&tf, 0, 0, d2h_fis); + + do { + highbank_cphy_disable_overrides(link->ap->port_no); + rc = sata_link_hardreset(link, timing, deadline, &online, NULL); + highbank_cphy_override_lane(link->ap->port_no); + + /* If the status is 1, we are connected, but the link did not + * come up. So retry resetting the link again. + */ + if (sata_scr_read(link, SCR_STATUS, &sstatus)) + break; + if (!(sstatus & 0x3)) + break; + } while (!online && retry--); + + hpriv->start_engine(ap); + + if (online) + *class = ahci_dev_classify(ap); + + return rc; +} + +static struct ata_port_operations ahci_highbank_ops = { + .inherits = &ahci_ops, + .hardreset = ahci_highbank_hardreset, + .transmit_led_message = ecx_transmit_led_message, +}; + +static const struct ata_port_info ahci_highbank_port_info = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_highbank_ops, +}; + +static struct scsi_host_template ahci_highbank_platform_sht = { + AHCI_SHT("sata_highbank"), +}; + +static const struct of_device_id ahci_of_match[] = { + { .compatible = "calxeda,hb-ahci" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ahci_of_match); + +static int ahci_highbank_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ahci_host_priv *hpriv; + struct ecx_plat_data *pdata; + struct ata_host *host; + struct resource *mem; + int irq; + int i; + int rc; + u32 n_ports; + struct ata_port_info pi = ahci_highbank_port_info; + const struct ata_port_info *ppi[] = { &pi, NULL }; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(dev, "no mmio space\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "no irq\n"); + return -EINVAL; + } + + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) { + dev_err(dev, "can't alloc ahci_host_priv\n"); + return -ENOMEM; + } + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + dev_err(dev, "can't alloc ecx_plat_data\n"); + return -ENOMEM; + } + + hpriv->flags |= (unsigned long)pi.private_data; + + hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem)); + if (!hpriv->mmio) { + dev_err(dev, "can't map %pR\n", mem); + return -ENOMEM; + } + + rc = highbank_initialize_phys(dev, hpriv->mmio); + if (rc) + return rc; + + + ahci_save_initial_config(dev, hpriv, 0, 0); + + /* prepare host */ + if (hpriv->cap & HOST_CAP_NCQ) + pi.flags |= ATA_FLAG_NCQ; + + if (hpriv->cap & HOST_CAP_PMP) + pi.flags |= ATA_FLAG_PMP; + + if (hpriv->cap & HOST_CAP_64) + dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); + + /* CAP.NP sometimes indicate the index of the last enabled + * port, at other times, that of the last possible port, so + * determining the maximum port number requires looking at + * both CAP.NP and port_map. + */ + n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); + + pdata->n_ports = n_ports; + hpriv->plat_data = pdata; + highbank_set_em_messages(dev, hpriv, &pi); + + host = ata_host_alloc_pinfo(dev, ppi, n_ports); + if (!host) { + rc = -ENOMEM; + goto err0; + } + + host->private_data = hpriv; + + if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) + host->flags |= ATA_HOST_PARALLEL_SCAN; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + ata_port_desc(ap, "mmio %pR", mem); + ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); + + /* set enclosure management message type */ + if (ap->flags & ATA_FLAG_EM) + ap->em_message_type = hpriv->em_msg_type; + + /* disabled/not-implemented port */ + if (!(hpriv->port_map & (1 << i))) + ap->ops = &ata_dummy_port_ops; + } + + rc = ahci_reset_controller(host); + if (rc) + goto err0; + + ahci_init_controller(host); + ahci_print_info(host, "platform"); + + rc = ata_host_activate(host, irq, ahci_interrupt, 0, + &ahci_highbank_platform_sht); + if (rc) + goto err0; + + return 0; +err0: + return rc; +} + +#ifdef CONFIG_PM_SLEEP +static int ahci_highbank_suspend(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 ctl; + int rc; + + if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { + dev_err(dev, "firmware update required for suspend/resume\n"); + return -EIO; + } + + /* + * AHCI spec rev1.1 section 8.3.3: + * Software must disable interrupts prior to requesting a + * transition of the HBA to D3 state. + */ + ctl = readl(mmio + HOST_CTL); + ctl &= ~HOST_IRQ_EN; + writel(ctl, mmio + HOST_CTL); + readl(mmio + HOST_CTL); /* flush */ + + rc = ata_host_suspend(host, PMSG_SUSPEND); + if (rc) + return rc; + + return 0; +} + +static int ahci_highbank_resume(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + int rc; + + if (dev->power.power_state.event == PM_EVENT_SUSPEND) { + rc = ahci_reset_controller(host); + if (rc) + return rc; + + ahci_init_controller(host); + } + + ata_host_resume(host); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops, + ahci_highbank_suspend, ahci_highbank_resume); + +static struct platform_driver ahci_highbank_driver = { + .remove = ata_platform_remove_one, + .driver = { + .name = "highbank-ahci", + .owner = THIS_MODULE, + .of_match_table = ahci_of_match, + .pm = &ahci_highbank_pm_ops, + }, + .probe = ahci_highbank_probe, +}; + +module_platform_driver(ahci_highbank_driver); + +MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver"); +MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("sata:highbank"); diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c new file mode 100644 index 00000000000..069827826b2 --- /dev/null +++ b/drivers/ata/sata_inic162x.c @@ -0,0 +1,915 @@ +/* + * sata_inic162x.c - Driver for Initio 162x SATA controllers + * + * Copyright 2006 SUSE Linux Products GmbH + * Copyright 2006 Tejun Heo <teheo@novell.com> + * + * This file is released under GPL v2. + * + * **** WARNING **** + * + * This driver never worked properly and unfortunately data corruption is + * relatively common. There isn't anyone working on the driver and there's + * no support from the vendor. Do not use this driver in any production + * environment. + * + * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491 + * https://bugzilla.kernel.org/show_bug.cgi?id=60565 + * + * ***************** + * + * This controller is eccentric and easily locks up if something isn't + * right. Documentation is available at initio's website but it only + * documents registers (not programming model). + * + * This driver has interesting history. The first version was written + * from the documentation and a 2.4 IDE driver posted on a Taiwan + * company, which didn't use any IDMA features and couldn't handle + * LBA48. The resulting driver couldn't handle LBA48 devices either + * making it pretty useless. + * + * After a while, initio picked the driver up, renamed it to + * sata_initio162x, updated it to use IDMA for ATA DMA commands and + * posted it on their website. It only used ATA_PROT_DMA for IDMA and + * attaching both devices and issuing IDMA and !IDMA commands + * simultaneously broke it due to PIRQ masking interaction but it did + * show how to use the IDMA (ADMA + some initio specific twists) + * engine. + * + * Then, I picked up their changes again and here's the usable driver + * which uses IDMA for everything. Everything works now including + * LBA48, CD/DVD burning, suspend/resume and hotplug. There are some + * issues tho. Result Tf is not resported properly, NCQ isn't + * supported yet and CD/DVD writing works with DMA assisted PIO + * protocol (which, for native SATA devices, shouldn't cause any + * noticeable difference). + * + * Anyways, so, here's finally a working driver for inic162x. Enjoy! + * + * initio: If you guys wanna improve the driver regarding result TF + * access and other stuff, please feel free to contact me. I'll be + * happy to assist. + */ + +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <scsi/scsi_host.h> +#include <linux/libata.h> +#include <linux/blkdev.h> +#include <scsi/scsi_device.h> + +#define DRV_NAME "sata_inic162x" +#define DRV_VERSION "0.4" + +enum { + MMIO_BAR_PCI = 5, + MMIO_BAR_CARDBUS = 1, + + NR_PORTS = 2, + + IDMA_CPB_TBL_SIZE = 4 * 32, + + INIC_DMA_BOUNDARY = 0xffffff, + + HOST_ACTRL = 0x08, + HOST_CTL = 0x7c, + HOST_STAT = 0x7e, + HOST_IRQ_STAT = 0xbc, + HOST_IRQ_MASK = 0xbe, + + PORT_SIZE = 0x40, + + /* registers for ATA TF operation */ + PORT_TF_DATA = 0x00, + PORT_TF_FEATURE = 0x01, + PORT_TF_NSECT = 0x02, + PORT_TF_LBAL = 0x03, + PORT_TF_LBAM = 0x04, + PORT_TF_LBAH = 0x05, + PORT_TF_DEVICE = 0x06, + PORT_TF_COMMAND = 0x07, + PORT_TF_ALT_STAT = 0x08, + PORT_IRQ_STAT = 0x09, + PORT_IRQ_MASK = 0x0a, + PORT_PRD_CTL = 0x0b, + PORT_PRD_ADDR = 0x0c, + PORT_PRD_XFERLEN = 0x10, + PORT_CPB_CPBLAR = 0x18, + PORT_CPB_PTQFIFO = 0x1c, + + /* IDMA register */ + PORT_IDMA_CTL = 0x14, + PORT_IDMA_STAT = 0x16, + + PORT_RPQ_FIFO = 0x1e, + PORT_RPQ_CNT = 0x1f, + + PORT_SCR = 0x20, + + /* HOST_CTL bits */ + HCTL_LEDEN = (1 << 3), /* enable LED operation */ + HCTL_IRQOFF = (1 << 8), /* global IRQ off */ + HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */ + HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/ + HCTL_PWRDWN = (1 << 12), /* power down PHYs */ + HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ + HCTL_RPGSEL = (1 << 15), /* register page select */ + + HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST | + HCTL_RPGSEL, + + /* HOST_IRQ_(STAT|MASK) bits */ + HIRQ_PORT0 = (1 << 0), + HIRQ_PORT1 = (1 << 1), + HIRQ_SOFT = (1 << 14), + HIRQ_GLOBAL = (1 << 15), /* STAT only */ + + /* PORT_IRQ_(STAT|MASK) bits */ + PIRQ_OFFLINE = (1 << 0), /* device unplugged */ + PIRQ_ONLINE = (1 << 1), /* device plugged */ + PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ + PIRQ_FATAL = (1 << 3), /* fatal error */ + PIRQ_ATA = (1 << 4), /* ATA interrupt */ + PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ + PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ + + PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, + PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA, + PIRQ_MASK_FREEZE = 0xff, + + /* PORT_PRD_CTL bits */ + PRD_CTL_START = (1 << 0), + PRD_CTL_WR = (1 << 3), + PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ + + /* PORT_IDMA_CTL bits */ + IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */ + IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ + IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ + IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ + + /* PORT_IDMA_STAT bits */ + IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */ + IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */ + IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */ + IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */ + IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */ + IDMA_STAT_PSD = (1 << 6), /* ADMA pause */ + IDMA_STAT_DONE = (1 << 7), /* ADMA done */ + + IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR, + + /* CPB Control Flags*/ + CPB_CTL_VALID = (1 << 0), /* CPB valid */ + CPB_CTL_QUEUED = (1 << 1), /* queued command */ + CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */ + CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */ + CPB_CTL_DEVDIR = (1 << 4), /* device direction control */ + + /* CPB Response Flags */ + CPB_RESP_DONE = (1 << 0), /* ATA command complete */ + CPB_RESP_REL = (1 << 1), /* ATA release */ + CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */ + CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */ + CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */ + CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */ + CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */ + CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */ + + /* PRD Control Flags */ + PRD_DRAIN = (1 << 1), /* ignore data excess */ + PRD_CDB = (1 << 2), /* atapi packet command pointer */ + PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */ + PRD_DMA = (1 << 4), /* data transfer method */ + PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */ + PRD_IOM = (1 << 6), /* io/memory transfer */ + PRD_END = (1 << 7), /* APRD chain end */ +}; + +/* Comman Parameter Block */ +struct inic_cpb { + u8 resp_flags; /* Response Flags */ + u8 error; /* ATA Error */ + u8 status; /* ATA Status */ + u8 ctl_flags; /* Control Flags */ + __le32 len; /* Total Transfer Length */ + __le32 prd; /* First PRD pointer */ + u8 rsvd[4]; + /* 16 bytes */ + u8 feature; /* ATA Feature */ + u8 hob_feature; /* ATA Ex. Feature */ + u8 device; /* ATA Device/Head */ + u8 mirctl; /* Mirror Control */ + u8 nsect; /* ATA Sector Count */ + u8 hob_nsect; /* ATA Ex. Sector Count */ + u8 lbal; /* ATA Sector Number */ + u8 hob_lbal; /* ATA Ex. Sector Number */ + u8 lbam; /* ATA Cylinder Low */ + u8 hob_lbam; /* ATA Ex. Cylinder Low */ + u8 lbah; /* ATA Cylinder High */ + u8 hob_lbah; /* ATA Ex. Cylinder High */ + u8 command; /* ATA Command */ + u8 ctl; /* ATA Control */ + u8 slave_error; /* Slave ATA Error */ + u8 slave_status; /* Slave ATA Status */ + /* 32 bytes */ +} __packed; + +/* Physical Region Descriptor */ +struct inic_prd { + __le32 mad; /* Physical Memory Address */ + __le16 len; /* Transfer Length */ + u8 rsvd; + u8 flags; /* Control Flags */ +} __packed; + +struct inic_pkt { + struct inic_cpb cpb; + struct inic_prd prd[LIBATA_MAX_PRD + 1]; /* + 1 for cdb */ + u8 cdb[ATAPI_CDB_LEN]; +} __packed; + +struct inic_host_priv { + void __iomem *mmio_base; + u16 cached_hctl; +}; + +struct inic_port_priv { + struct inic_pkt *pkt; + dma_addr_t pkt_dma; + u32 *cpb_tbl; + dma_addr_t cpb_tbl_dma; +}; + +static struct scsi_host_template inic_sht = { + ATA_BASE_SHT(DRV_NAME), + .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ + .dma_boundary = INIC_DMA_BOUNDARY, +}; + +static const int scr_map[] = { + [SCR_STATUS] = 0, + [SCR_ERROR] = 1, + [SCR_CONTROL] = 2, +}; + +static void __iomem *inic_port_base(struct ata_port *ap) +{ + struct inic_host_priv *hpriv = ap->host->private_data; + + return hpriv->mmio_base + ap->port_no * PORT_SIZE; +} + +static void inic_reset_port(void __iomem *port_base) +{ + void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; + + /* stop IDMA engine */ + readw(idma_ctl); /* flush */ + msleep(1); + + /* mask IRQ and assert reset */ + writew(IDMA_CTL_RST_IDMA, idma_ctl); + readw(idma_ctl); /* flush */ + msleep(1); + + /* release reset */ + writew(0, idma_ctl); + + /* clear irq */ + writeb(0xff, port_base + PORT_IRQ_STAT); +} + +static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val) +{ + void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR; + + if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) + return -EINVAL; + + *val = readl(scr_addr + scr_map[sc_reg] * 4); + + /* this controller has stuck DIAG.N, ignore it */ + if (sc_reg == SCR_ERROR) + *val &= ~SERR_PHYRDY_CHG; + return 0; +} + +static int inic_scr_write(struct ata_link *link, unsigned sc_reg, u32 val) +{ + void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR; + + if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) + return -EINVAL; + + writel(val, scr_addr + scr_map[sc_reg] * 4); + return 0; +} + +static void inic_stop_idma(struct ata_port *ap) +{ + void __iomem *port_base = inic_port_base(ap); + + readb(port_base + PORT_RPQ_FIFO); + readb(port_base + PORT_RPQ_CNT); + writew(0, port_base + PORT_IDMA_CTL); +} + +static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + struct inic_port_priv *pp = ap->private_data; + struct inic_cpb *cpb = &pp->pkt->cpb; + bool freeze = false; + + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x", + irq_stat, idma_stat); + + inic_stop_idma(ap); + + if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { + ata_ehi_push_desc(ehi, "hotplug"); + ata_ehi_hotplugged(ehi); + freeze = true; + } + + if (idma_stat & IDMA_STAT_PERR) { + ata_ehi_push_desc(ehi, "PCI error"); + freeze = true; + } + + if (idma_stat & IDMA_STAT_CPBERR) { + ata_ehi_push_desc(ehi, "CPB error"); + + if (cpb->resp_flags & CPB_RESP_IGNORED) { + __ata_ehi_push_desc(ehi, " ignored"); + ehi->err_mask |= AC_ERR_INVALID; + freeze = true; + } + + if (cpb->resp_flags & CPB_RESP_ATA_ERR) + ehi->err_mask |= AC_ERR_DEV; + + if (cpb->resp_flags & CPB_RESP_SPURIOUS) { + __ata_ehi_push_desc(ehi, " spurious-intr"); + ehi->err_mask |= AC_ERR_HSM; + freeze = true; + } + + if (cpb->resp_flags & + (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) { + __ata_ehi_push_desc(ehi, " data-over/underflow"); + ehi->err_mask |= AC_ERR_HSM; + freeze = true; + } + } + + if (freeze) + ata_port_freeze(ap); + else + ata_port_abort(ap); +} + +static void inic_host_intr(struct ata_port *ap) +{ + void __iomem *port_base = inic_port_base(ap); + struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); + u8 irq_stat; + u16 idma_stat; + + /* read and clear IRQ status */ + irq_stat = readb(port_base + PORT_IRQ_STAT); + writeb(irq_stat, port_base + PORT_IRQ_STAT); + idma_stat = readw(port_base + PORT_IDMA_STAT); + + if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) + inic_host_err_intr(ap, irq_stat, idma_stat); + + if (unlikely(!qc)) + goto spurious; + + if (likely(idma_stat & IDMA_STAT_DONE)) { + inic_stop_idma(ap); + + /* Depending on circumstances, device error + * isn't reported by IDMA, check it explicitly. + */ + if (unlikely(readb(port_base + PORT_TF_COMMAND) & + (ATA_DF | ATA_ERR))) + qc->err_mask |= AC_ERR_DEV; + + ata_qc_complete(qc); + return; + } + + spurious: + ata_port_warn(ap, "unhandled interrupt: cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n", + qc ? qc->tf.command : 0xff, irq_stat, idma_stat); +} + +static irqreturn_t inic_interrupt(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + struct inic_host_priv *hpriv = host->private_data; + u16 host_irq_stat; + int i, handled = 0; + + host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT); + + if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) + goto out; + + spin_lock(&host->lock); + + for (i = 0; i < NR_PORTS; i++) + if (host_irq_stat & (HIRQ_PORT0 << i)) { + inic_host_intr(host->ports[i]); + handled++; + } + + spin_unlock(&host->lock); + + out: + return IRQ_RETVAL(handled); +} + +static int inic_check_atapi_dma(struct ata_queued_cmd *qc) +{ + /* For some reason ATAPI_PROT_DMA doesn't work for some + * commands including writes and other misc ops. Use PIO + * protocol instead, which BTW is driven by the DMA engine + * anyway, so it shouldn't make much difference for native + * SATA devices. + */ + if (atapi_cmd_type(qc->cdb[0]) == READ) + return 0; + return 1; +} + +static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) +{ + struct scatterlist *sg; + unsigned int si; + u8 flags = 0; + + if (qc->tf.flags & ATA_TFLAG_WRITE) + flags |= PRD_WRITE; + + if (ata_is_dma(qc->tf.protocol)) + flags |= PRD_DMA; + + for_each_sg(qc->sg, sg, qc->n_elem, si) { + prd->mad = cpu_to_le32(sg_dma_address(sg)); + prd->len = cpu_to_le16(sg_dma_len(sg)); + prd->flags = flags; + prd++; + } + + WARN_ON(!si); + prd[-1].flags |= PRD_END; +} + +static void inic_qc_prep(struct ata_queued_cmd *qc) +{ + struct inic_port_priv *pp = qc->ap->private_data; + struct inic_pkt *pkt = pp->pkt; + struct inic_cpb *cpb = &pkt->cpb; + struct inic_prd *prd = pkt->prd; + bool is_atapi = ata_is_atapi(qc->tf.protocol); + bool is_data = ata_is_data(qc->tf.protocol); + unsigned int cdb_len = 0; + + VPRINTK("ENTER\n"); + + if (is_atapi) + cdb_len = qc->dev->cdb_len; + + /* prepare packet, based on initio driver */ + memset(pkt, 0, sizeof(struct inic_pkt)); + + cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN; + if (is_atapi || is_data) + cpb->ctl_flags |= CPB_CTL_DATA; + + cpb->len = cpu_to_le32(qc->nbytes + cdb_len); + cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd)); + + cpb->device = qc->tf.device; + cpb->feature = qc->tf.feature; + cpb->nsect = qc->tf.nsect; + cpb->lbal = qc->tf.lbal; + cpb->lbam = qc->tf.lbam; + cpb->lbah = qc->tf.lbah; + + if (qc->tf.flags & ATA_TFLAG_LBA48) { + cpb->hob_feature = qc->tf.hob_feature; + cpb->hob_nsect = qc->tf.hob_nsect; + cpb->hob_lbal = qc->tf.hob_lbal; + cpb->hob_lbam = qc->tf.hob_lbam; + cpb->hob_lbah = qc->tf.hob_lbah; + } + + cpb->command = qc->tf.command; + /* don't load ctl - dunno why. it's like that in the initio driver */ + + /* setup PRD for CDB */ + if (is_atapi) { + memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN); + prd->mad = cpu_to_le32(pp->pkt_dma + + offsetof(struct inic_pkt, cdb)); + prd->len = cpu_to_le16(cdb_len); + prd->flags = PRD_CDB | PRD_WRITE; + if (!is_data) + prd->flags |= PRD_END; + prd++; + } + + /* setup sg table */ + if (is_data) + inic_fill_sg(prd, qc); + + pp->cpb_tbl[0] = pp->pkt_dma; +} + +static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *port_base = inic_port_base(ap); + + /* fire up the ADMA engine */ + writew(HCTL_FTHD0 | HCTL_LEDEN, port_base + HOST_CTL); + writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL); + writeb(0, port_base + PORT_CPB_PTQFIFO); + + return 0; +} + +static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + void __iomem *port_base = inic_port_base(ap); + + tf->feature = readb(port_base + PORT_TF_FEATURE); + tf->nsect = readb(port_base + PORT_TF_NSECT); + tf->lbal = readb(port_base + PORT_TF_LBAL); + tf->lbam = readb(port_base + PORT_TF_LBAM); + tf->lbah = readb(port_base + PORT_TF_LBAH); + tf->device = readb(port_base + PORT_TF_DEVICE); + tf->command = readb(port_base + PORT_TF_COMMAND); +} + +static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) +{ + struct ata_taskfile *rtf = &qc->result_tf; + struct ata_taskfile tf; + + /* FIXME: Except for status and error, result TF access + * doesn't work. I tried reading from BAR0/2, CPB and BAR5. + * None works regardless of which command interface is used. + * For now return true iff status indicates device error. + * This means that we're reporting bogus sector for RW + * failures. Eeekk.... + */ + inic_tf_read(qc->ap, &tf); + + if (!(tf.command & ATA_ERR)) + return false; + + rtf->command = tf.command; + rtf->feature = tf.feature; + return true; +} + +static void inic_freeze(struct ata_port *ap) +{ + void __iomem *port_base = inic_port_base(ap); + + writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK); + writeb(0xff, port_base + PORT_IRQ_STAT); +} + +static void inic_thaw(struct ata_port *ap) +{ + void __iomem *port_base = inic_port_base(ap); + + writeb(0xff, port_base + PORT_IRQ_STAT); + writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK); +} + +static int inic_check_ready(struct ata_link *link) +{ + void __iomem *port_base = inic_port_base(link->ap); + + return ata_check_ready(readb(port_base + PORT_TF_COMMAND)); +} + +/* + * SRST and SControl hardreset don't give valid signature on this + * controller. Only controller specific hardreset mechanism works. + */ +static int inic_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + void __iomem *port_base = inic_port_base(ap); + void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; + const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); + int rc; + + /* hammer it into sane state */ + inic_reset_port(port_base); + + writew(IDMA_CTL_RST_ATA, idma_ctl); + readw(idma_ctl); /* flush */ + ata_msleep(ap, 1); + writew(0, idma_ctl); + + rc = sata_link_resume(link, timing, deadline); + if (rc) { + ata_link_warn(link, + "failed to resume link after reset (errno=%d)\n", + rc); + return rc; + } + + *class = ATA_DEV_NONE; + if (ata_link_online(link)) { + struct ata_taskfile tf; + + /* wait for link to become ready */ + rc = ata_wait_after_reset(link, deadline, inic_check_ready); + /* link occupied, -ENODEV too is an error */ + if (rc) { + ata_link_warn(link, + "device not ready after hardreset (errno=%d)\n", + rc); + return rc; + } + + inic_tf_read(ap, &tf); + *class = ata_dev_classify(&tf); + } + + return 0; +} + +static void inic_error_handler(struct ata_port *ap) +{ + void __iomem *port_base = inic_port_base(ap); + + inic_reset_port(port_base); + ata_std_error_handler(ap); +} + +static void inic_post_internal_cmd(struct ata_queued_cmd *qc) +{ + /* make DMA engine forget about the failed command */ + if (qc->flags & ATA_QCFLAG_FAILED) + inic_reset_port(inic_port_base(qc->ap)); +} + +static void init_port(struct ata_port *ap) +{ + void __iomem *port_base = inic_port_base(ap); + struct inic_port_priv *pp = ap->private_data; + + /* clear packet and CPB table */ + memset(pp->pkt, 0, sizeof(struct inic_pkt)); + memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE); + + /* setup CPB lookup table addresses */ + writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR); +} + +static int inic_port_resume(struct ata_port *ap) +{ + init_port(ap); + return 0; +} + +static int inic_port_start(struct ata_port *ap) +{ + struct device *dev = ap->host->dev; + struct inic_port_priv *pp; + + /* alloc and initialize private data */ + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; + ap->private_data = pp; + + /* Alloc resources */ + pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt), + &pp->pkt_dma, GFP_KERNEL); + if (!pp->pkt) + return -ENOMEM; + + pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE, + &pp->cpb_tbl_dma, GFP_KERNEL); + if (!pp->cpb_tbl) + return -ENOMEM; + + init_port(ap); + + return 0; +} + +static struct ata_port_operations inic_port_ops = { + .inherits = &sata_port_ops, + + .check_atapi_dma = inic_check_atapi_dma, + .qc_prep = inic_qc_prep, + .qc_issue = inic_qc_issue, + .qc_fill_rtf = inic_qc_fill_rtf, + + .freeze = inic_freeze, + .thaw = inic_thaw, + .hardreset = inic_hardreset, + .error_handler = inic_error_handler, + .post_internal_cmd = inic_post_internal_cmd, + + .scr_read = inic_scr_read, + .scr_write = inic_scr_write, + + .port_resume = inic_port_resume, + .port_start = inic_port_start, +}; + +static struct ata_port_info inic_port_info = { + .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &inic_port_ops +}; + +static int init_controller(void __iomem *mmio_base, u16 hctl) +{ + int i; + u16 val; + + hctl &= ~HCTL_KNOWN_BITS; + + /* Soft reset whole controller. Spec says reset duration is 3 + * PCI clocks, be generous and give it 10ms. + */ + writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL); + readw(mmio_base + HOST_CTL); /* flush */ + + for (i = 0; i < 10; i++) { + msleep(1); + val = readw(mmio_base + HOST_CTL); + if (!(val & HCTL_SOFTRST)) + break; + } + + if (val & HCTL_SOFTRST) + return -EIO; + + /* mask all interrupts and reset ports */ + for (i = 0; i < NR_PORTS; i++) { + void __iomem *port_base = mmio_base + i * PORT_SIZE; + + writeb(0xff, port_base + PORT_IRQ_MASK); + inic_reset_port(port_base); + } + + /* port IRQ is masked now, unmask global IRQ */ + writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL); + val = readw(mmio_base + HOST_IRQ_MASK); + val &= ~(HIRQ_PORT0 | HIRQ_PORT1); + writew(val, mmio_base + HOST_IRQ_MASK); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int inic_pci_device_resume(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + struct inic_host_priv *hpriv = host->private_data; + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; + + if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { + rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); + if (rc) + return rc; + } + + ata_host_resume(host); + + return 0; +} +#endif + +static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct ata_port_info *ppi[] = { &inic_port_info, NULL }; + struct ata_host *host; + struct inic_host_priv *hpriv; + void __iomem * const *iomap; + int mmio_bar; + int i, rc; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n"); + + /* alloc host */ + host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); + hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); + if (!host || !hpriv) + return -ENOMEM; + + host->private_data = hpriv; + + /* Acquire resources and fill host. Note that PCI and cardbus + * use different BARs. + */ + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM) + mmio_bar = MMIO_BAR_PCI; + else + mmio_bar = MMIO_BAR_CARDBUS; + + rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME); + if (rc) + return rc; + host->iomap = iomap = pcim_iomap_table(pdev); + hpriv->mmio_base = iomap[mmio_bar]; + hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL); + + for (i = 0; i < NR_PORTS; i++) { + struct ata_port *ap = host->ports[i]; + + ata_port_pbar_desc(ap, mmio_bar, -1, "mmio"); + ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port"); + } + + /* Set dma_mask. This devices doesn't support 64bit addressing. */ + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, "32-bit DMA enable failed\n"); + return rc; + } + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); + return rc; + } + + /* + * This controller is braindamaged. dma_boundary is 0xffff + * like others but it will lock up the whole machine HARD if + * 65536 byte PRD entry is fed. Reduce maximum segment size. + */ + rc = pci_set_dma_max_seg_size(pdev, 65536 - 512); + if (rc) { + dev_err(&pdev->dev, "failed to set the maximum segment size\n"); + return rc; + } + + rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); + if (rc) { + dev_err(&pdev->dev, "failed to initialize controller\n"); + return rc; + } + + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED, + &inic_sht); +} + +static const struct pci_device_id inic_pci_tbl[] = { + { PCI_VDEVICE(INIT, 0x1622), }, + { }, +}; + +static struct pci_driver inic_pci_driver = { + .name = DRV_NAME, + .id_table = inic_pci_tbl, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = inic_pci_device_resume, +#endif + .probe = inic_init_one, + .remove = ata_pci_remove_one, +}; + +module_pci_driver(inic_pci_driver); + +MODULE_AUTHOR("Tejun Heo"); +MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, inic_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index c01496df4a9..391cfda1e83 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -1,9 +1,13 @@ /* * sata_mv.c - Marvell SATA support * + * Copyright 2008-2009: Marvell Corporation, all rights reserved. * Copyright 2005: EMC Corporation, all rights reserved. * Copyright 2005 Red Hat, Inc. All rights reserved. * + * Originally written by Brett Russ. + * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>. + * * Please ALWAYS copy linux-ide@vger.kernel.org on emails. * * This program is free software; you can redistribute it and/or modify @@ -21,6 +25,30 @@ * */ +/* + * sata_mv TODO list: + * + * --> Develop a low-power-consumption strategy, and implement it. + * + * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. + * + * --> [Experiment, Marvell value added] Is it possible to use target + * mode to cross-connect two Linux boxes with Marvell cards? If so, + * creating LibATA target mode support would be very interesting. + * + * Target mode, for those without docs, is the ability to directly + * connect two SATA ports. + */ + +/* + * 80x1-B2 errata PCI#11: + * + * Users of the 6041/6081 Rev.B2 chips (current is C0) + * should be careful to insert those cards only onto PCI-X bus #0, + * and only in device slots 0..7, not higher. The chips may not + * work correctly otherwise (note: this is a pretty rare condition). + */ + #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> @@ -28,16 +56,45 @@ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/dmapool.h> #include <linux/dma-mapping.h> #include <linux/device.h> +#include <linux/clk.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/ata_platform.h> +#include <linux/mbus.h> +#include <linux/bitops.h> +#include <linux/gfp.h> +#include <linux/of.h> +#include <linux/of_irq.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> #include <linux/libata.h> -#include <asm/io.h> #define DRV_NAME "sata_mv" -#define DRV_VERSION "0.7" +#define DRV_VERSION "1.28" + +/* + * module options + */ + +#ifdef CONFIG_PCI +static int msi; +module_param(msi, int, S_IRUGO); +MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); +#endif + +static int irq_coalescing_io_count; +module_param(irq_coalescing_io_count, int, S_IRUGO); +MODULE_PARM_DESC(irq_coalescing_io_count, + "IRQ coalescing I/O count threshold (0..255)"); + +static int irq_coalescing_usecs; +module_param(irq_coalescing_usecs, int, S_IRUGO); +MODULE_PARM_DESC(irq_coalescing_usecs, + "IRQ coalescing time threshold in usecs"); enum { /* BAR's are enumerated in terms of pci_resource_start() terms */ @@ -48,74 +105,102 @@ enum { MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ + /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */ + COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */ + MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */ + MAX_COAL_IO_COUNT = 255, /* completed I/O count */ + MV_PCI_REG_BASE = 0, - MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ - MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), - MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), - MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), - MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), - MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), - - MV_SATAHC0_REG_BASE = 0x20000, - MV_FLASH_CTL = 0x1046c, - MV_GPIO_PORT_CTL = 0x104f0, - MV_RESET_CFG = 0x180d8, + + /* + * Per-chip ("all ports") interrupt coalescing feature. + * This is only for GEN_II / GEN_IIE hardware. + * + * Coalescing defers the interrupt until either the IO_THRESHOLD + * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. + */ + COAL_REG_BASE = 0x18000, + IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08), + ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ + + IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc), + IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0), + + /* + * Registers for the (unused here) transaction coalescing feature: + */ + TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88), + TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c), + + SATAHC0_REG_BASE = 0x20000, + FLASH_CTL = 0x1046c, + GPIO_PORT_CTL = 0x104f0, + RESET_CFG = 0x180d8, MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, - MV_USE_Q_DEPTH = ATA_DEF_QUEUE, - MV_MAX_Q_DEPTH = 32, MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, /* CRQB needs alignment on a 1KB boundary. Size == 1KB * CRPB needs alignment on a 256B boundary. Size == 256B - * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B */ MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), - MV_MAX_SG_CT = 176, + MV_MAX_SG_CT = 256, MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), - MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ), - MV_PORTS_PER_HC = 4, - /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ + /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ MV_PORT_HC_SHIFT = 2, - /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */ - MV_PORT_MASK = 3, + MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ + /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ + MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ /* Host Flags */ MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ - MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ - MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | - ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING), - MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, + + MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING, + + MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, + + MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ | + ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA, + + MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, CRQB_FLAG_READ = (1 << 0), CRQB_TAG_SHIFT = 1, + CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ + CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ + CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ CRQB_CMD_ADDR_SHIFT = 8, CRQB_CMD_CS = (0x2 << 11), CRQB_CMD_LAST = (1 << 15), CRPB_FLAG_STATUS_SHIFT = 8, + CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ + CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ EPRD_FLAG_END_OF_TBL = (1 << 31), /* PCI interface registers */ - PCI_COMMAND_OFS = 0xc00, + MV_PCI_COMMAND = 0xc00, + MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */ + MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ - PCI_MAIN_CMD_STS_OFS = 0xd30, + PCI_MAIN_CMD_STS = 0xd30, STOP_PCI_MASTER = (1 << 2), PCI_MASTER_EMPTY = (1 << 3), GLOB_SFT_RST = (1 << 4), MV_PCI_MODE = 0xd00, + MV_PCI_MODE_MASK = 0x30, + MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, MV_PCI_DISC_TIMER = 0xd04, MV_PCI_MSI_TRIGGER = 0xc38, @@ -126,137 +211,259 @@ enum { MV_PCI_ERR_ATTRIBUTE = 0x1d48, MV_PCI_ERR_COMMAND = 0x1d50, - PCI_IRQ_CAUSE_OFS = 0x1d58, - PCI_IRQ_MASK_OFS = 0x1d5c, + PCI_IRQ_CAUSE = 0x1d58, + PCI_IRQ_MASK = 0x1d5c, PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ - HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, - HC_MAIN_IRQ_MASK_OFS = 0x1d64, - PORT0_ERR = (1 << 0), /* shift by port # */ - PORT0_DONE = (1 << 1), /* shift by port # */ + PCIE_IRQ_CAUSE = 0x1900, + PCIE_IRQ_MASK = 0x1910, + PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ + + /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ + PCI_HC_MAIN_IRQ_CAUSE = 0x1d60, + PCI_HC_MAIN_IRQ_MASK = 0x1d64, + SOC_HC_MAIN_IRQ_CAUSE = 0x20020, + SOC_HC_MAIN_IRQ_MASK = 0x20024, + ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ + DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ + DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */ + DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */ PCI_ERR = (1 << 18), - TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ - TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ - PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ + TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */ + TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */ + PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */ + PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */ + ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */ GPIO_INT = (1 << 22), SELF_INT = (1 << 23), TWSI_INT = (1 << 24), HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ - HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | - PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | - HC_MAIN_RSVD), + HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ + HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ /* SATAHC registers */ - HC_CFG_OFS = 0, + HC_CFG = 0x00, - HC_IRQ_CAUSE_OFS = 0x14, - CRPB_DMA_DONE = (1 << 0), /* shift by port # */ - HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ + HC_IRQ_CAUSE = 0x14, + DMA_IRQ = (1 << 0), /* shift by port # */ + HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ DEV_IRQ = (1 << 8), /* shift by port # */ + /* + * Per-HC (Host-Controller) interrupt coalescing feature. + * This is present on all chip generations. + * + * Coalescing defers the interrupt until either the IO_THRESHOLD + * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. + */ + HC_IRQ_COAL_IO_THRESHOLD = 0x000c, + HC_IRQ_COAL_TIME_THRESHOLD = 0x0010, + + SOC_LED_CTRL = 0x2c, + SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */ + SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ + /* with dev activity LED */ + /* Shadow block registers */ - SHD_BLK_OFS = 0x100, - SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ + SHD_BLK = 0x100, + SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */ /* SATA registers */ - SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ - SATA_ACTIVE_OFS = 0x350, - PHY_MODE3 = 0x310, - PHY_MODE4 = 0x314, + SATA_STATUS = 0x300, /* ctrl, err regs follow status */ + SATA_ACTIVE = 0x350, + FIS_IRQ_CAUSE = 0x364, + FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */ + + LTMODE = 0x30c, /* requires read-after-write */ + LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ + PHY_MODE2 = 0x330, + PHY_MODE3 = 0x310, + + PHY_MODE4 = 0x314, /* requires read-after-write */ + PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ + PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ + PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ + PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ + + SATA_IFCTL = 0x344, + SATA_TESTCTL = 0x348, + SATA_IFSTAT = 0x34c, + VENDOR_UNIQUE_FIS = 0x35c, + + FISCFG = 0x360, + FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ + FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ + + PHY_MODE9_GEN2 = 0x398, + PHY_MODE9_GEN1 = 0x39c, + PHYCFG_OFS = 0x3a0, /* only in 65n devices */ + MV5_PHY_MODE = 0x74, - MV5_LT_MODE = 0x30, + MV5_LTMODE = 0x30, MV5_PHY_CTL = 0x0C, - SATA_INTERFACE_CTL = 0x050, + SATA_IFCFG = 0x050, + LP_PHY_CTL = 0x058, MV_M2_PREAMP_MASK = 0x7e0, /* Port registers */ - EDMA_CFG_OFS = 0, - EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ - EDMA_CFG_NCQ = (1 << 5), - EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ - EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ - EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ - - EDMA_ERR_IRQ_CAUSE_OFS = 0x8, - EDMA_ERR_IRQ_MASK_OFS = 0xc, - EDMA_ERR_D_PAR = (1 << 0), - EDMA_ERR_PRD_PAR = (1 << 1), - EDMA_ERR_DEV = (1 << 2), - EDMA_ERR_DEV_DCON = (1 << 3), - EDMA_ERR_DEV_CON = (1 << 4), - EDMA_ERR_SERR = (1 << 5), - EDMA_ERR_SELF_DIS = (1 << 7), - EDMA_ERR_BIST_ASYNC = (1 << 8), - EDMA_ERR_CRBQ_PAR = (1 << 9), - EDMA_ERR_CRPB_PAR = (1 << 10), - EDMA_ERR_INTRL_PAR = (1 << 11), - EDMA_ERR_IORDY = (1 << 12), - EDMA_ERR_LNK_CTRL_RX = (0xf << 13), - EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), - EDMA_ERR_LNK_DATA_RX = (0xf << 17), - EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), - EDMA_ERR_LNK_DATA_TX = (0x1f << 26), - EDMA_ERR_TRANS_PROTO = (1 << 31), - EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | - EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR | - EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | - EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 | - EDMA_ERR_LNK_DATA_RX | - EDMA_ERR_LNK_DATA_TX | - EDMA_ERR_TRANS_PROTO), - - EDMA_REQ_Q_BASE_HI_OFS = 0x10, - EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ - - EDMA_REQ_Q_OUT_PTR_OFS = 0x18, + EDMA_CFG = 0, + EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ + EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ + EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ + EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ + EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ + EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ + EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ + + EDMA_ERR_IRQ_CAUSE = 0x8, + EDMA_ERR_IRQ_MASK = 0xc, + EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ + EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ + EDMA_ERR_DEV = (1 << 2), /* device error */ + EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ + EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ + EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ + EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ + EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ + EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ + EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ + EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ + EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ + EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ + EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ + + EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ + EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ + EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ + EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ + EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ + + EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ + + EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ + EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ + EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ + EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ + EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ + EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ + + EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ + + EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ + EDMA_ERR_OVERRUN_5 = (1 << 5), + EDMA_ERR_UNDERRUN_5 = (1 << 6), + + EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | + EDMA_ERR_LNK_CTRL_RX_1 | + EDMA_ERR_LNK_CTRL_RX_3 | + EDMA_ERR_LNK_CTRL_TX, + + EDMA_EH_FREEZE = EDMA_ERR_D_PAR | + EDMA_ERR_PRD_PAR | + EDMA_ERR_DEV_DCON | + EDMA_ERR_DEV_CON | + EDMA_ERR_SERR | + EDMA_ERR_SELF_DIS | + EDMA_ERR_CRQB_PAR | + EDMA_ERR_CRPB_PAR | + EDMA_ERR_INTRL_PAR | + EDMA_ERR_IORDY | + EDMA_ERR_LNK_CTRL_RX_2 | + EDMA_ERR_LNK_DATA_RX | + EDMA_ERR_LNK_DATA_TX | + EDMA_ERR_TRANS_PROTO, + + EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | + EDMA_ERR_PRD_PAR | + EDMA_ERR_DEV_DCON | + EDMA_ERR_DEV_CON | + EDMA_ERR_OVERRUN_5 | + EDMA_ERR_UNDERRUN_5 | + EDMA_ERR_SELF_DIS_5 | + EDMA_ERR_CRQB_PAR | + EDMA_ERR_CRPB_PAR | + EDMA_ERR_INTRL_PAR | + EDMA_ERR_IORDY, + + EDMA_REQ_Q_BASE_HI = 0x10, + EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */ + + EDMA_REQ_Q_OUT_PTR = 0x18, EDMA_REQ_Q_PTR_SHIFT = 5, - EDMA_RSP_Q_BASE_HI_OFS = 0x1c, - EDMA_RSP_Q_IN_PTR_OFS = 0x20, - EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ + EDMA_RSP_Q_BASE_HI = 0x1c, + EDMA_RSP_Q_IN_PTR = 0x20, + EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */ EDMA_RSP_Q_PTR_SHIFT = 3, - EDMA_CMD_OFS = 0x28, - EDMA_EN = (1 << 0), - EDMA_DS = (1 << 1), - ATA_RST = (1 << 2), + EDMA_CMD = 0x28, /* EDMA command register */ + EDMA_EN = (1 << 0), /* enable EDMA */ + EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ + EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ + + EDMA_STATUS = 0x30, /* EDMA engine status */ + EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ + EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ EDMA_IORDY_TMOUT = 0x34, EDMA_ARB_CFG = 0x38, + EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */ + EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */ + + BMDMA_CMD = 0x224, /* bmdma command register */ + BMDMA_STATUS = 0x228, /* bmdma status register */ + BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */ + BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */ + /* Host private flags (hp_flags) */ MV_HP_FLAG_MSI = (1 << 0), MV_HP_ERRATA_50XXB0 = (1 << 1), MV_HP_ERRATA_50XXB2 = (1 << 2), MV_HP_ERRATA_60X1B2 = (1 << 3), MV_HP_ERRATA_60X1C0 = (1 << 4), - MV_HP_ERRATA_XX42A0 = (1 << 5), - MV_HP_50XX = (1 << 6), - MV_HP_GEN_IIE = (1 << 7), + MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ + MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ + MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ + MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ + MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ + MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ + MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */ + MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */ /* Port private flags (pp_flags) */ - MV_PP_FLAG_EDMA_EN = (1 << 0), - MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), + MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ + MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ + MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ + MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ + MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */ }; -#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) -#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) -#define IS_GEN_I(hpriv) IS_50XX(hpriv) -#define IS_GEN_II(hpriv) IS_60XX(hpriv) +#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) +#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) +#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) +#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) + +#define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) +#define WINDOW_BASE(i) (0x20034 + ((i) << 4)) enum { - /* Our DMA boundary is determined by an ePRD being unable to handle - * anything larger than 64KB + /* DMA boundary 0xffff is required by the s/g splitting + * we need on /length/ in mv_fill-sg(). */ MV_DMA_BOUNDARY = 0xffffU, + /* mask of register bits containing lower 32 bits + * of EDMA request queue DMA address + */ EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, + /* ditto, for response queue */ EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, }; @@ -268,6 +475,7 @@ enum chip_type { chip_608x, chip_6042, chip_7042, + chip_soc, }; /* Command ReQuest Block: 32B */ @@ -301,14 +509,32 @@ struct mv_sg { __le32 reserved; }; +/* + * We keep a local cache of a few frequently accessed port + * registers here, to avoid having to read them (very slow) + * when switching between EDMA and non-EDMA modes. + */ +struct mv_cached_regs { + u32 fiscfg; + u32 ltmode; + u32 haltcond; + u32 unknown_rsvd; +}; + struct mv_port_priv { struct mv_crqb *crqb; dma_addr_t crqb_dma; struct mv_crpb *crpb; dma_addr_t crpb_dma; - struct mv_sg *sg_tbl; - dma_addr_t sg_tbl_dma; + struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; + dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; + + unsigned int req_idx; + unsigned int resp_idx; + u32 pp_flags; + struct mv_cached_regs cached; + unsigned int delayed_eh_pmp_map; }; struct mv_port_signal { @@ -316,7 +542,45 @@ struct mv_port_signal { u32 pre; }; -struct mv_host_priv; +struct mv_host_priv { + u32 hp_flags; + unsigned int board_idx; + u32 main_irq_mask; + struct mv_port_signal signal[8]; + const struct mv_hw_ops *ops; + int n_ports; + void __iomem *base; + void __iomem *main_irq_cause_addr; + void __iomem *main_irq_mask_addr; + u32 irq_cause_offset; + u32 irq_mask_offset; + u32 unmask_all_irqs; + + /* + * Needed on some devices that require their clocks to be enabled. + * These are optional: if the platform device does not have any + * clocks, they won't be used. Also, if the underlying hardware + * does not support the common clock framework (CONFIG_HAVE_CLK=n), + * all the clock operations become no-ops (see clk.h). + */ + struct clk *clk; + struct clk **port_clks; + /* + * Some devices have a SATA PHY which can be enabled/disabled + * in order to save power. These are optional: if the platform + * devices does not have any phy, they won't be used. + */ + struct phy **port_phys; + /* + * These consistent DMA memory pools give us guaranteed + * alignment for hardware-accessed data structures, + * and less memory waste in accomplishing the alignment. + */ + struct dma_pool *crqb_pool; + struct dma_pool *crpb_pool; + struct dma_pool *sg_tbl_pool; +}; + struct mv_hw_ops { void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); @@ -326,32 +590,24 @@ struct mv_hw_ops { int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); - void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio); -}; - -struct mv_host_priv { - u32 hp_flags; - struct mv_port_signal signal[8]; - const struct mv_hw_ops *ops; + void (*reset_bus)(struct ata_host *host, void __iomem *mmio); }; -static void mv_irq_clear(struct ata_port *ap); -static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); -static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); -static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in); -static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); -static void mv_phy_reset(struct ata_port *ap); -static void __mv_phy_reset(struct ata_port *ap, int can_sleep); -static void mv_host_stop(struct ata_host *host); +static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); +static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); +static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); +static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); static int mv_port_start(struct ata_port *ap); static void mv_port_stop(struct ata_port *ap); +static int mv_qc_defer(struct ata_queued_cmd *qc); static void mv_qc_prep(struct ata_queued_cmd *qc); static void mv_qc_prep_iie(struct ata_queued_cmd *qc); static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); -static irqreturn_t mv_interrupt(int irq, void *dev_instance, - struct pt_regs *regs); -static void mv_eng_timeout(struct ata_port *ap); -static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int mv_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static void mv_eh_freeze(struct ata_port *ap); +static void mv_eh_thaw(struct ata_port *ap); +static void mv6_dev_config(struct ata_device *dev); static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); @@ -361,7 +617,7 @@ static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); -static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio); +static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); @@ -371,188 +627,199 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); -static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); -static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, +static void mv_soc_enable_leds(struct mv_host_priv *hpriv, + void __iomem *mmio); +static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, + void __iomem *mmio); +static int mv_soc_reset_hc(struct mv_host_priv *hpriv, + void __iomem *mmio, unsigned int n_hc); +static void mv_soc_reset_flash(struct mv_host_priv *hpriv, + void __iomem *mmio); +static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); +static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, + void __iomem *mmio, unsigned int port); +static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); +static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no); -static void mv_stop_and_reset(struct ata_port *ap); - -static struct scsi_host_template mv_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = MV_USE_Q_DEPTH, - .this_id = ATA_SHT_THIS_ID, +static int mv_stop_edma(struct ata_port *ap); +static int mv_stop_edma_engine(void __iomem *port_mmio); +static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); + +static void mv_pmp_select(struct ata_port *ap, int pmp); +static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static int mv_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static void mv_pmp_error_handler(struct ata_port *ap); +static void mv_process_crpb_entries(struct ata_port *ap, + struct mv_port_priv *pp); + +static void mv_sff_irq_clear(struct ata_port *ap); +static int mv_check_atapi_dma(struct ata_queued_cmd *qc); +static void mv_bmdma_setup(struct ata_queued_cmd *qc); +static void mv_bmdma_start(struct ata_queued_cmd *qc); +static void mv_bmdma_stop(struct ata_queued_cmd *qc); +static u8 mv_bmdma_status(struct ata_port *ap); +static u8 mv_sff_check_status(struct ata_port *ap); + +/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below + * because we have to allow room for worst case splitting of + * PRDs for 64K boundaries in mv_fill_sg(). + */ +#ifdef CONFIG_PCI +static struct scsi_host_template mv5_sht = { + ATA_BASE_SHT(DRV_NAME), + .sg_tablesize = MV_MAX_SG_CT / 2, + .dma_boundary = MV_DMA_BOUNDARY, +}; +#endif +static struct scsi_host_template mv6_sht = { + ATA_NCQ_SHT(DRV_NAME), + .can_queue = MV_MAX_Q_DEPTH - 1, .sg_tablesize = MV_MAX_SG_CT / 2, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, .dma_boundary = MV_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, }; -static const struct ata_port_operations mv5_ops = { - .port_disable = ata_port_disable, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, +static struct ata_port_operations mv5_ops = { + .inherits = &ata_sff_port_ops, - .phy_reset = mv_phy_reset, + .lost_interrupt = ATA_OP_NULL, + .qc_defer = mv_qc_defer, .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, - .data_xfer = ata_mmio_data_xfer, - .eng_timeout = mv_eng_timeout, - - .irq_handler = mv_interrupt, - .irq_clear = mv_irq_clear, + .freeze = mv_eh_freeze, + .thaw = mv_eh_thaw, + .hardreset = mv_hardreset, .scr_read = mv5_scr_read, .scr_write = mv5_scr_write, .port_start = mv_port_start, .port_stop = mv_port_stop, - .host_stop = mv_host_stop, }; -static const struct ata_port_operations mv6_ops = { - .port_disable = ata_port_disable, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, +static struct ata_port_operations mv6_ops = { + .inherits = &ata_bmdma_port_ops, - .phy_reset = mv_phy_reset, + .lost_interrupt = ATA_OP_NULL, + .qc_defer = mv_qc_defer, .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, - .data_xfer = ata_mmio_data_xfer, - .eng_timeout = mv_eng_timeout, + .dev_config = mv6_dev_config, - .irq_handler = mv_interrupt, - .irq_clear = mv_irq_clear, + .freeze = mv_eh_freeze, + .thaw = mv_eh_thaw, + .hardreset = mv_hardreset, + .softreset = mv_softreset, + .pmp_hardreset = mv_pmp_hardreset, + .pmp_softreset = mv_softreset, + .error_handler = mv_pmp_error_handler, .scr_read = mv_scr_read, .scr_write = mv_scr_write, + .sff_check_status = mv_sff_check_status, + .sff_irq_clear = mv_sff_irq_clear, + .check_atapi_dma = mv_check_atapi_dma, + .bmdma_setup = mv_bmdma_setup, + .bmdma_start = mv_bmdma_start, + .bmdma_stop = mv_bmdma_stop, + .bmdma_status = mv_bmdma_status, + .port_start = mv_port_start, .port_stop = mv_port_stop, - .host_stop = mv_host_stop, }; -static const struct ata_port_operations mv_iie_ops = { - .port_disable = ata_port_disable, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .phy_reset = mv_phy_reset, - +static struct ata_port_operations mv_iie_ops = { + .inherits = &mv6_ops, + .dev_config = ATA_OP_NULL, .qc_prep = mv_qc_prep_iie, - .qc_issue = mv_qc_issue, - .data_xfer = ata_mmio_data_xfer, - - .eng_timeout = mv_eng_timeout, - - .irq_handler = mv_interrupt, - .irq_clear = mv_irq_clear, - - .scr_read = mv_scr_read, - .scr_write = mv_scr_write, - - .port_start = mv_port_start, - .port_stop = mv_port_stop, - .host_stop = mv_host_stop, }; static const struct ata_port_info mv_port_info[] = { { /* chip_504x */ - .sht = &mv_sht, - .flags = MV_COMMON_FLAGS, - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 */ + .flags = MV_GEN_I_FLAGS, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_508x */ - .sht = &mv_sht, - .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 */ + .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_5080 */ - .sht = &mv_sht, - .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 */ + .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_604x */ - .sht = &mv_sht, - .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 */ + .flags = MV_GEN_II_FLAGS, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_608x */ - .sht = &mv_sht, - .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | - MV_FLAG_DUAL_HC), - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 */ + .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_6042 */ - .sht = &mv_sht, - .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 */ + .flags = MV_GEN_IIE_FLAGS, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, { /* chip_7042 */ - .sht = &mv_sht, - .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | - MV_FLAG_DUAL_HC), - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 */ + .flags = MV_GEN_IIE_FLAGS, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &mv_iie_ops, + }, + { /* chip_soc */ + .flags = MV_GEN_IIE_FLAGS, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, }; static const struct pci_device_id mv_pci_tbl[] = { - {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x}, - {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x}, - {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_5080}, - {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x}, - - {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, - {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, - {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042}, - {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, - {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, - - {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x}, - {} /* terminate list */ -}; - -static struct pci_driver mv_pci_driver = { - .name = DRV_NAME, - .id_table = mv_pci_tbl, - .probe = mv_init_one, - .remove = ata_pci_remove_one, + { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, + { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, + { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, + { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, + /* RocketRAID 1720/174x have different identifiers */ + { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, + { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, + { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, + + { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, + { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, + { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, + { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, + { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, + + { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, + + /* Adaptec 1430SA */ + { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, + + /* Marvell 7042 support */ + { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, + + /* Highpoint RocketRAID PCIe series */ + { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, + { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, + + { } /* terminate list */ }; static const struct mv_hw_ops mv5xxx_ops = { @@ -573,11 +840,22 @@ static const struct mv_hw_ops mv6xxx_ops = { .reset_bus = mv_reset_pci_bus, }; -/* - * module options - */ -static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ +static const struct mv_hw_ops mv_soc_ops = { + .phy_errata = mv6_phy_errata, + .enable_leds = mv_soc_enable_leds, + .read_preamp = mv_soc_read_preamp, + .reset_hc = mv_soc_reset_hc, + .reset_flash = mv_soc_reset_flash, + .reset_bus = mv_soc_reset_bus, +}; +static const struct mv_hw_ops mv_soc_65n_ops = { + .phy_errata = mv_soc_65n_phy_errata, + .enable_leds = mv_soc_enable_leds, + .reset_hc = mv_soc_reset_hc, + .reset_flash = mv_soc_reset_flash, + .reset_bus = mv_soc_reset_bus, +}; /* * Functions @@ -589,11 +867,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr) (void) readl(addr); /* flush to avoid PCI posted write */ } -static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) -{ - return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); -} - static inline unsigned int mv_hc_from_port(unsigned int port) { return port >> MV_PORT_HC_SHIFT; @@ -604,6 +877,29 @@ static inline unsigned int mv_hardport_from_port(unsigned int port) return port & MV_PORT_MASK; } +/* + * Consolidate some rather tricky bit shift calculations. + * This is hot-path stuff, so not a function. + * Simple code, with two return values, so macro rather than inline. + * + * port is the sole input, in range 0..7. + * shift is one output, for use with main_irq_cause / main_irq_mask registers. + * hardport is the other output, in range 0..3. + * + * Note that port and hardport may be the same variable in some cases. + */ +#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ +{ \ + shift = mv_hc_from_port(port) * HC_SHIFT; \ + hardport = mv_hardport_from_port(port); \ + shift += hardport * 2; \ +} + +static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) +{ + return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); +} + static inline void __iomem *mv_hc_base_from_port(void __iomem *base, unsigned int port) { @@ -617,9 +913,23 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) (mv_hardport_from_port(port) * MV_PORT_REG_SZ); } +static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) +{ + void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); + unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; + + return hc_mmio + ofs; +} + +static inline void __iomem *mv_host_base(struct ata_host *host) +{ + struct mv_host_priv *hpriv = host->private_data; + return hpriv->base; +} + static inline void __iomem *mv_ap_base(struct ata_port *ap) { - return mv_port_base(ap->host->mmio_base, ap->port_no); + return mv_port_base(mv_host_base(ap->host), ap->port_no); } static inline int mv_get_hc_count(unsigned long port_flags) @@ -627,12 +937,225 @@ static inline int mv_get_hc_count(unsigned long port_flags) return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); } -static void mv_irq_clear(struct ata_port *ap) +/** + * mv_save_cached_regs - (re-)initialize cached port registers + * @ap: the port whose registers we are caching + * + * Initialize the local cache of port registers, + * so that reading them over and over again can + * be avoided on the hotter paths of this driver. + * This saves a few microseconds each time we switch + * to/from EDMA mode to perform (eg.) a drive cache flush. + */ +static void mv_save_cached_regs(struct ata_port *ap) { + void __iomem *port_mmio = mv_ap_base(ap); + struct mv_port_priv *pp = ap->private_data; + + pp->cached.fiscfg = readl(port_mmio + FISCFG); + pp->cached.ltmode = readl(port_mmio + LTMODE); + pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND); + pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD); } /** - * mv_start_dma - Enable eDMA engine + * mv_write_cached_reg - write to a cached port register + * @addr: hardware address of the register + * @old: pointer to cached value of the register + * @new: new value for the register + * + * Write a new value to a cached register, + * but only if the value is different from before. + */ +static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) +{ + if (new != *old) { + unsigned long laddr; + *old = new; + /* + * Workaround for 88SX60x1-B2 FEr SATA#13: + * Read-after-write is needed to prevent generating 64-bit + * write cycles on the PCI bus for SATA interface registers + * at offsets ending in 0x4 or 0xc. + * + * Looks like a lot of fuss, but it avoids an unnecessary + * +1 usec read-after-write delay for unaffected registers. + */ + laddr = (long)addr & 0xffff; + if (laddr >= 0x300 && laddr <= 0x33c) { + laddr &= 0x000f; + if (laddr == 0x4 || laddr == 0xc) { + writelfl(new, addr); /* read after write */ + return; + } + } + writel(new, addr); /* unaffected by the errata */ + } +} + +static void mv_set_edma_ptrs(void __iomem *port_mmio, + struct mv_host_priv *hpriv, + struct mv_port_priv *pp) +{ + u32 index; + + /* + * initialize request queue + */ + pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ + index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; + + WARN_ON(pp->crqb_dma & 0x3ff); + writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI); + writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, + port_mmio + EDMA_REQ_Q_IN_PTR); + writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR); + + /* + * initialize response queue + */ + pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ + index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; + + WARN_ON(pp->crpb_dma & 0xff); + writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI); + writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR); + writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, + port_mmio + EDMA_RSP_Q_OUT_PTR); +} + +static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) +{ + /* + * When writing to the main_irq_mask in hardware, + * we must ensure exclusivity between the interrupt coalescing bits + * and the corresponding individual port DONE_IRQ bits. + * + * Note that this register is really an "IRQ enable" register, + * not an "IRQ mask" register as Marvell's naming might suggest. + */ + if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE)) + mask &= ~DONE_IRQ_0_3; + if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE)) + mask &= ~DONE_IRQ_4_7; + writelfl(mask, hpriv->main_irq_mask_addr); +} + +static void mv_set_main_irq_mask(struct ata_host *host, + u32 disable_bits, u32 enable_bits) +{ + struct mv_host_priv *hpriv = host->private_data; + u32 old_mask, new_mask; + + old_mask = hpriv->main_irq_mask; + new_mask = (old_mask & ~disable_bits) | enable_bits; + if (new_mask != old_mask) { + hpriv->main_irq_mask = new_mask; + mv_write_main_irq_mask(new_mask, hpriv); + } +} + +static void mv_enable_port_irqs(struct ata_port *ap, + unsigned int port_bits) +{ + unsigned int shift, hardport, port = ap->port_no; + u32 disable_bits, enable_bits; + + MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); + + disable_bits = (DONE_IRQ | ERR_IRQ) << shift; + enable_bits = port_bits << shift; + mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); +} + +static void mv_clear_and_enable_port_irqs(struct ata_port *ap, + void __iomem *port_mmio, + unsigned int port_irqs) +{ + struct mv_host_priv *hpriv = ap->host->private_data; + int hardport = mv_hardport_from_port(ap->port_no); + void __iomem *hc_mmio = mv_hc_base_from_port( + mv_host_base(ap->host), ap->port_no); + u32 hc_irq_cause; + + /* clear EDMA event indicators, if any */ + writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); + + /* clear pending irq events */ + hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); + writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); + + /* clear FIS IRQ Cause */ + if (IS_GEN_IIE(hpriv)) + writelfl(0, port_mmio + FIS_IRQ_CAUSE); + + mv_enable_port_irqs(ap, port_irqs); +} + +static void mv_set_irq_coalescing(struct ata_host *host, + unsigned int count, unsigned int usecs) +{ + struct mv_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->base, *hc_mmio; + u32 coal_enable = 0; + unsigned long flags; + unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC; + const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | + ALL_PORTS_COAL_DONE; + + /* Disable IRQ coalescing if either threshold is zero */ + if (!usecs || !count) { + clks = count = 0; + } else { + /* Respect maximum limits of the hardware */ + clks = usecs * COAL_CLOCKS_PER_USEC; + if (clks > MAX_COAL_TIME_THRESHOLD) + clks = MAX_COAL_TIME_THRESHOLD; + if (count > MAX_COAL_IO_COUNT) + count = MAX_COAL_IO_COUNT; + } + + spin_lock_irqsave(&host->lock, flags); + mv_set_main_irq_mask(host, coal_disable, 0); + + if (is_dual_hc && !IS_GEN_I(hpriv)) { + /* + * GEN_II/GEN_IIE with dual host controllers: + * one set of global thresholds for the entire chip. + */ + writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD); + writel(count, mmio + IRQ_COAL_IO_THRESHOLD); + /* clear leftover coal IRQ bit */ + writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); + if (count) + coal_enable = ALL_PORTS_COAL_DONE; + clks = count = 0; /* force clearing of regular regs below */ + } + + /* + * All chips: independent thresholds for each HC on the chip. + */ + hc_mmio = mv_hc_base_from_port(mmio, 0); + writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); + writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); + writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); + if (count) + coal_enable |= PORTS_0_3_COAL_DONE; + if (is_dual_hc) { + hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC); + writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); + writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); + writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); + if (count) + coal_enable |= PORTS_4_7_COAL_DONE; + } + + mv_set_main_irq_mask(host, 0, coal_enable); + spin_unlock_irqrestore(&host->lock, flags); +} + +/** + * mv_start_edma - Enable eDMA engine * @base: port base address * @pp: port private data * @@ -642,54 +1165,92 @@ static void mv_irq_clear(struct ata_port *ap) * LOCKING: * Inherited from caller. */ -static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) +static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, + struct mv_port_priv *pp, u8 protocol) { - if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { - writelfl(EDMA_EN, base + EDMA_CMD_OFS); + int want_ncq = (protocol == ATA_PROT_NCQ); + + if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { + int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); + if (want_ncq != using_ncq) + mv_stop_edma(ap); + } + if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { + struct mv_host_priv *hpriv = ap->host->private_data; + + mv_edma_cfg(ap, want_ncq, 1); + + mv_set_edma_ptrs(port_mmio, hpriv, pp); + mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); + + writelfl(EDMA_EN, port_mmio + EDMA_CMD); pp->pp_flags |= MV_PP_FLAG_EDMA_EN; } - WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS))); +} + +static void mv_wait_for_edma_empty_idle(struct ata_port *ap) +{ + void __iomem *port_mmio = mv_ap_base(ap); + const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); + const int per_loop = 5, timeout = (15 * 1000 / per_loop); + int i; + + /* + * Wait for the EDMA engine to finish transactions in progress. + * No idea what a good "timeout" value might be, but measurements + * indicate that it often requires hundreds of microseconds + * with two drives in-use. So we use the 15msec value above + * as a rough guess at what even more drives might require. + */ + for (i = 0; i < timeout; ++i) { + u32 edma_stat = readl(port_mmio + EDMA_STATUS); + if ((edma_stat & empty_idle) == empty_idle) + break; + udelay(per_loop); + } + /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */ } /** - * mv_stop_dma - Disable eDMA engine - * @ap: ATA channel to manipulate - * - * Verify the local cache of the eDMA state is accurate with a - * WARN_ON. + * mv_stop_edma_engine - Disable eDMA engine + * @port_mmio: io base address * * LOCKING: * Inherited from caller. */ -static void mv_stop_dma(struct ata_port *ap) +static int mv_stop_edma_engine(void __iomem *port_mmio) { - void __iomem *port_mmio = mv_ap_base(ap); - struct mv_port_priv *pp = ap->private_data; - u32 reg; int i; - if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) { - /* Disable EDMA if active. The disable bit auto clears. - */ - writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); - pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; - } else { - WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); - } + /* Disable eDMA. The disable bit auto clears. */ + writelfl(EDMA_DS, port_mmio + EDMA_CMD); - /* now properly wait for the eDMA to stop */ - for (i = 1000; i > 0; i--) { - reg = readl(port_mmio + EDMA_CMD_OFS); - if (!(EDMA_EN & reg)) { - break; - } - udelay(100); + /* Wait for the chip to confirm eDMA is off. */ + for (i = 10000; i > 0; i--) { + u32 reg = readl(port_mmio + EDMA_CMD); + if (!(reg & EDMA_EN)) + return 0; + udelay(10); } + return -EIO; +} - if (EDMA_EN & reg) { - ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); - /* FIXME: Consider doing a reset here to recover */ +static int mv_stop_edma(struct ata_port *ap) +{ + void __iomem *port_mmio = mv_ap_base(ap); + struct mv_port_priv *pp = ap->private_data; + int err = 0; + + if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) + return 0; + pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; + mv_wait_for_edma_empty_idle(ap); + if (mv_stop_edma_engine(port_mmio)) { + ata_port_err(ap, "Unable to stop eDMA\n"); + err = -EIO; } + mv_edma_cfg(ap, 0, 0); + return err; } #ifdef ATA_DEBUG @@ -699,14 +1260,14 @@ static void mv_dump_mem(void __iomem *start, unsigned bytes) for (b = 0; b < bytes; ) { DPRINTK("%p: ", start + b); for (w = 0; b < bytes && w < 4; w++) { - printk("%08x ",readl(start + b)); + printk("%08x ", readl(start + b)); b += sizeof(u32); } printk("\n"); } } #endif - +#if defined(ATA_DEBUG) || defined(CONFIG_PCI) static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) { #ifdef ATA_DEBUG @@ -715,14 +1276,15 @@ static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) for (b = 0; b < bytes; ) { DPRINTK("%02x: ", b); for (w = 0; b < bytes && w < 4; w++) { - (void) pci_read_config_dword(pdev,b,&dw); - printk("%08x ",dw); + (void) pci_read_config_dword(pdev, b, &dw); + printk("%08x ", dw); b += sizeof(u32); } printk("\n"); } #endif } +#endif static void mv_dump_all_regs(void __iomem *mmio_base, int port, struct pci_dev *pdev) { @@ -760,9 +1322,9 @@ static void mv_dump_all_regs(void __iomem *mmio_base, int port, } for (p = start_port; p < start_port + num_ports; p++) { port_base = mv_port_base(mmio_base, p); - DPRINTK("EDMA regs (port %i):\n",p); + DPRINTK("EDMA regs (port %i):\n", p); mv_dump_mem(port_base, 0x54); - DPRINTK("SATA regs (port %i):\n",p); + DPRINTK("SATA regs (port %i):\n", p); mv_dump_mem(port_base+0x300, 0x60); } #endif @@ -776,10 +1338,10 @@ static unsigned int mv_scr_offset(unsigned int sc_reg_in) case SCR_STATUS: case SCR_CONTROL: case SCR_ERROR: - ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); + ofs = SATA_STATUS + (sc_reg_in * sizeof(u32)); break; case SCR_ACTIVE: - ofs = SATA_ACTIVE_OFS; /* active is not with the others */ + ofs = SATA_ACTIVE; /* active is not with the others */ break; default: ofs = 0xffffffffU; @@ -788,80 +1350,346 @@ static unsigned int mv_scr_offset(unsigned int sc_reg_in) return ofs; } -static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in) +static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) { unsigned int ofs = mv_scr_offset(sc_reg_in); - if (0xffffffffU != ofs) { - return readl(mv_ap_base(ap) + ofs); - } else { - return (u32) ofs; - } + if (ofs != 0xffffffffU) { + *val = readl(mv_ap_base(link->ap) + ofs); + return 0; + } else + return -EINVAL; } -static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) +static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) { unsigned int ofs = mv_scr_offset(sc_reg_in); - if (0xffffffffU != ofs) { - writelfl(val, mv_ap_base(ap) + ofs); + if (ofs != 0xffffffffU) { + void __iomem *addr = mv_ap_base(link->ap) + ofs; + struct mv_host_priv *hpriv = link->ap->host->private_data; + if (sc_reg_in == SCR_CONTROL) { + /* + * Workaround for 88SX60x1 FEr SATA#26: + * + * COMRESETs have to take care not to accidentally + * put the drive to sleep when writing SCR_CONTROL. + * Setting bits 12..15 prevents this problem. + * + * So if we see an outbound COMMRESET, set those bits. + * Ditto for the followup write that clears the reset. + * + * The proprietary driver does this for + * all chip versions, and so do we. + */ + if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1) + val |= 0xf000; + + if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) { + void __iomem *lp_phy_addr = + mv_ap_base(link->ap) + LP_PHY_CTL; + /* + * Set PHY speed according to SControl speed. + */ + if ((val & 0xf0) == 0x10) + writelfl(0x7, lp_phy_addr); + else + writelfl(0x227, lp_phy_addr); + } + } + writelfl(val, addr); + return 0; + } else + return -EINVAL; +} + +static void mv6_dev_config(struct ata_device *adev) +{ + /* + * Deal with Gen-II ("mv6") hardware quirks/restrictions: + * + * Gen-II does not support NCQ over a port multiplier + * (no FIS-based switching). + */ + if (adev->flags & ATA_DFLAG_NCQ) { + if (sata_pmp_attached(adev->link->ap)) { + adev->flags &= ~ATA_DFLAG_NCQ; + ata_dev_info(adev, + "NCQ disabled for command-based switching\n"); + } + } +} + +static int mv_qc_defer(struct ata_queued_cmd *qc) +{ + struct ata_link *link = qc->dev->link; + struct ata_port *ap = link->ap; + struct mv_port_priv *pp = ap->private_data; + + /* + * Don't allow new commands if we're in a delayed EH state + * for NCQ and/or FIS-based switching. + */ + if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) + return ATA_DEFER_PORT; + + /* PIO commands need exclusive link: no other commands [DMA or PIO] + * can run concurrently. + * set excl_link when we want to send a PIO command in DMA mode + * or a non-NCQ command in NCQ mode. + * When we receive a command from that link, and there are no + * outstanding commands, mark a flag to clear excl_link and let + * the command go through. + */ + if (unlikely(ap->excl_link)) { + if (link == ap->excl_link) { + if (ap->nr_active_links) + return ATA_DEFER_PORT; + qc->flags |= ATA_QCFLAG_CLEAR_EXCL; + return 0; + } else + return ATA_DEFER_PORT; + } + + /* + * If the port is completely idle, then allow the new qc. + */ + if (ap->nr_active_links == 0) + return 0; + + /* + * The port is operating in host queuing mode (EDMA) with NCQ + * enabled, allow multiple NCQ commands. EDMA also allows + * queueing multiple DMA commands but libata core currently + * doesn't allow it. + */ + if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && + (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { + if (ata_is_ncq(qc->tf.protocol)) + return 0; + else { + ap->excl_link = link; + return ATA_DEFER_PORT; + } } + + return ATA_DEFER_PORT; +} + +static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) +{ + struct mv_port_priv *pp = ap->private_data; + void __iomem *port_mmio; + + u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; + u32 ltmode, *old_ltmode = &pp->cached.ltmode; + u32 haltcond, *old_haltcond = &pp->cached.haltcond; + + ltmode = *old_ltmode & ~LTMODE_BIT8; + haltcond = *old_haltcond | EDMA_ERR_DEV; + + if (want_fbs) { + fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; + ltmode = *old_ltmode | LTMODE_BIT8; + if (want_ncq) + haltcond &= ~EDMA_ERR_DEV; + else + fiscfg |= FISCFG_WAIT_DEV_ERR; + } else { + fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); + } + + port_mmio = mv_ap_base(ap); + mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg); + mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode); + mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond); +} + +static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) +{ + struct mv_host_priv *hpriv = ap->host->private_data; + u32 old, new; + + /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ + old = readl(hpriv->base + GPIO_PORT_CTL); + if (want_ncq) + new = old | (1 << 22); + else + new = old & ~(1 << 22); + if (new != old) + writel(new, hpriv->base + GPIO_PORT_CTL); } /** - * mv_host_stop - Host specific cleanup/stop routine. - * @host: host data structure + * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma + * @ap: Port being initialized * - * Disable ints, cleanup host memory, call general purpose - * host_stop. + * There are two DMA modes on these chips: basic DMA, and EDMA. * - * LOCKING: - * Inherited from caller. + * Bit-0 of the "EDMA RESERVED" register enables/disables use + * of basic DMA on the GEN_IIE versions of the chips. + * + * This bit survives EDMA resets, and must be set for basic DMA + * to function, and should be cleared when EDMA is active. + */ +static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) +{ + struct mv_port_priv *pp = ap->private_data; + u32 new, *old = &pp->cached.unknown_rsvd; + + if (enable_bmdma) + new = *old | 1; + else + new = *old & ~1; + mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new); +} + +/* + * SOC chips have an issue whereby the HDD LEDs don't always blink + * during I/O when NCQ is enabled. Enabling a special "LED blink" mode + * of the SOC takes care of it, generating a steady blink rate when + * any drive on the chip is active. + * + * Unfortunately, the blink mode is a global hardware setting for the SOC, + * so we must use it whenever at least one port on the SOC has NCQ enabled. + * + * We turn "LED blink" off when NCQ is not in use anywhere, because the normal + * LED operation works then, and provides better (more accurate) feedback. + * + * Note that this code assumes that an SOC never has more than one HC onboard. */ -static void mv_host_stop(struct ata_host *host) +static void mv_soc_led_blink_enable(struct ata_port *ap) { + struct ata_host *host = ap->host; struct mv_host_priv *hpriv = host->private_data; - struct pci_dev *pdev = to_pci_dev(host->dev); + void __iomem *hc_mmio; + u32 led_ctrl; - if (hpriv->hp_flags & MV_HP_FLAG_MSI) { - pci_disable_msi(pdev); - } else { - pci_intx(pdev, 0); - } - kfree(hpriv); - ata_host_stop(host); + if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN) + return; + hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN; + hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); + led_ctrl = readl(hc_mmio + SOC_LED_CTRL); + writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); } -static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev) +static void mv_soc_led_blink_disable(struct ata_port *ap) { - dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); + struct ata_host *host = ap->host; + struct mv_host_priv *hpriv = host->private_data; + void __iomem *hc_mmio; + u32 led_ctrl; + unsigned int port; + + if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)) + return; + + /* disable led-blink only if no ports are using NCQ */ + for (port = 0; port < hpriv->n_ports; port++) { + struct ata_port *this_ap = host->ports[port]; + struct mv_port_priv *pp = this_ap->private_data; + + if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) + return; + } + + hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN; + hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); + led_ctrl = readl(hc_mmio + SOC_LED_CTRL); + writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); } -static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) +static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) { - u32 cfg = readl(port_mmio + EDMA_CFG_OFS); + u32 cfg; + struct mv_port_priv *pp = ap->private_data; + struct mv_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = mv_ap_base(ap); /* set up non-NCQ EDMA configuration */ - cfg &= ~0x1f; /* clear queue depth */ - cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */ - cfg &= ~(1 << 9); /* disable equeue */ + cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ + pp->pp_flags &= + ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); if (IS_GEN_I(hpriv)) cfg |= (1 << 8); /* enab config burst size mask */ - else if (IS_GEN_II(hpriv)) + else if (IS_GEN_II(hpriv)) { cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; + mv_60x1_errata_sata25(ap, want_ncq); + + } else if (IS_GEN_IIE(hpriv)) { + int want_fbs = sata_pmp_attached(ap); + /* + * Possible future enhancement: + * + * The chip can use FBS with non-NCQ, if we allow it, + * But first we need to have the error handling in place + * for this mode (datasheet section 7.3.15.4.2.3). + * So disallow non-NCQ FBS for now. + */ + want_fbs &= want_ncq; + + mv_config_fbs(ap, want_ncq, want_fbs); + + if (want_fbs) { + pp->pp_flags |= MV_PP_FLAG_FBS_EN; + cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ + } + + cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ + if (want_edma) { + cfg |= (1 << 22); /* enab 4-entry host queue cache */ + if (!IS_SOC(hpriv)) + cfg |= (1 << 18); /* enab early completion */ + } + if (hpriv->hp_flags & MV_HP_CUT_THROUGH) + cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ + mv_bmdma_enable_iie(ap, !want_edma); + + if (IS_SOC(hpriv)) { + if (want_ncq) + mv_soc_led_blink_enable(ap); + else + mv_soc_led_blink_disable(ap); + } + } - else if (IS_GEN_IIE(hpriv)) { - cfg |= (1 << 23); /* dis RX PM port mask */ - cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ - cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */ - cfg |= (1 << 18); /* enab early completion */ - cfg |= (1 << 17); /* enab host q cache */ - cfg |= (1 << 22); /* enab cutthrough */ + if (want_ncq) { + cfg |= EDMA_CFG_NCQ; + pp->pp_flags |= MV_PP_FLAG_NCQ_EN; } - writelfl(cfg, port_mmio + EDMA_CFG_OFS); + writelfl(cfg, port_mmio + EDMA_CFG); +} + +static void mv_port_free_dma_mem(struct ata_port *ap) +{ + struct mv_host_priv *hpriv = ap->host->private_data; + struct mv_port_priv *pp = ap->private_data; + int tag; + + if (pp->crqb) { + dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); + pp->crqb = NULL; + } + if (pp->crpb) { + dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); + pp->crpb = NULL; + } + /* + * For GEN_I, there's no NCQ, so we have only a single sg_tbl. + * For later hardware, we have one unique sg_tbl per NCQ tag. + */ + for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { + if (pp->sg_tbl[tag]) { + if (tag == 0 || !IS_GEN_I(hpriv)) + dma_pool_free(hpriv->sg_tbl_pool, + pp->sg_tbl[tag], + pp->sg_tbl_dma[tag]); + pp->sg_tbl[tag] = NULL; + } + } } /** @@ -879,84 +1707,53 @@ static int mv_port_start(struct ata_port *ap) struct device *dev = ap->host->dev; struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp; - void __iomem *port_mmio = mv_ap_base(ap); - void *mem; - dma_addr_t mem_dma; - int rc = -ENOMEM; + unsigned long flags; + int tag; - pp = kmalloc(sizeof(*pp), GFP_KERNEL); + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) - goto err_out; - memset(pp, 0, sizeof(*pp)); - - mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, - GFP_KERNEL); - if (!mem) - goto err_out_pp; - memset(mem, 0, MV_PORT_PRIV_DMA_SZ); - - rc = ata_pad_alloc(ap, dev); - if (rc) - goto err_out_priv; - - /* First item in chunk of DMA memory: - * 32-slot command request table (CRQB), 32 bytes each in size - */ - pp->crqb = mem; - pp->crqb_dma = mem_dma; - mem += MV_CRQB_Q_SZ; - mem_dma += MV_CRQB_Q_SZ; - - /* Second item: - * 32-slot command response table (CRPB), 8 bytes each in size - */ - pp->crpb = mem; - pp->crpb_dma = mem_dma; - mem += MV_CRPB_Q_SZ; - mem_dma += MV_CRPB_Q_SZ; + return -ENOMEM; + ap->private_data = pp; - /* Third item: - * Table of scatter-gather descriptors (ePRD), 16 bytes each + pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); + if (!pp->crqb) + return -ENOMEM; + memset(pp->crqb, 0, MV_CRQB_Q_SZ); + + pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); + if (!pp->crpb) + goto out_port_free_dma_mem; + memset(pp->crpb, 0, MV_CRPB_Q_SZ); + + /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ + if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) + ap->flags |= ATA_FLAG_AN; + /* + * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. + * For later hardware, we need one unique sg_tbl per NCQ tag. */ - pp->sg_tbl = mem; - pp->sg_tbl_dma = mem_dma; - - mv_edma_cfg(hpriv, port_mmio); - - writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); - writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, - port_mmio + EDMA_REQ_Q_IN_PTR_OFS); - - if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) - writelfl(pp->crqb_dma & 0xffffffff, - port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); - else - writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); - - writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); - - if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) - writelfl(pp->crpb_dma & 0xffffffff, - port_mmio + EDMA_RSP_Q_IN_PTR_OFS); - else - writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); + for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { + if (tag == 0 || !IS_GEN_I(hpriv)) { + pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, + GFP_KERNEL, &pp->sg_tbl_dma[tag]); + if (!pp->sg_tbl[tag]) + goto out_port_free_dma_mem; + } else { + pp->sg_tbl[tag] = pp->sg_tbl[0]; + pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; + } + } - writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, - port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); + spin_lock_irqsave(ap->lock, flags); + mv_save_cached_regs(ap); + mv_edma_cfg(ap, 0, 0); + spin_unlock_irqrestore(ap->lock, flags); - /* Don't turn on EDMA here...do it before DMA commands only. Else - * we'll be unable to send non-data, PIO, etc due to restricted access - * to shadow regs. - */ - ap->private_data = pp; return 0; -err_out_priv: - mv_priv_free(pp, dev); -err_out_pp: - kfree(pp); -err_out: - return rc; +out_port_free_dma_mem: + mv_port_free_dma_mem(ap); + return -ENOMEM; } /** @@ -970,18 +1767,13 @@ err_out: */ static void mv_port_stop(struct ata_port *ap) { - struct device *dev = ap->host->dev; - struct mv_port_priv *pp = ap->private_data; unsigned long flags; - spin_lock_irqsave(&ap->host->lock, flags); - mv_stop_dma(ap); - spin_unlock_irqrestore(&ap->host->lock, flags); - - ap->private_data = NULL; - ata_pad_free(ap, dev); - mv_priv_free(pp, dev); - kfree(pp); + spin_lock_irqsave(ap->lock, flags); + mv_stop_edma(ap); + mv_enable_port_irqs(ap, 0); + spin_unlock_irqrestore(ap->lock, flags); + mv_port_free_dma_mem(ap); } /** @@ -996,43 +1788,41 @@ static void mv_port_stop(struct ata_port *ap) static void mv_fill_sg(struct ata_queued_cmd *qc) { struct mv_port_priv *pp = qc->ap->private_data; - unsigned int i = 0; struct scatterlist *sg; + struct mv_sg *mv_sg, *last_sg = NULL; + unsigned int si; - ata_for_each_sg(sg, qc) { - dma_addr_t addr; - u32 sg_len, len, offset; - - addr = sg_dma_address(sg); - sg_len = sg_dma_len(sg); + mv_sg = pp->sg_tbl[qc->tag]; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + dma_addr_t addr = sg_dma_address(sg); + u32 sg_len = sg_dma_len(sg); while (sg_len) { - offset = addr & MV_DMA_BOUNDARY; - len = sg_len; - if ((offset + sg_len) > 0x10000) + u32 offset = addr & 0xffff; + u32 len = sg_len; + + if (offset + len > 0x10000) len = 0x10000 - offset; - pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); - pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); - pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff); + mv_sg->addr = cpu_to_le32(addr & 0xffffffff); + mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); + mv_sg->flags_size = cpu_to_le32(len & 0xffff); + mv_sg->reserved = 0; sg_len -= len; addr += len; - if (!sg_len && ata_sg_is_last(sg, qc)) - pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); - - i++; + last_sg = mv_sg; + mv_sg++; } } -} -static inline unsigned mv_inc_q_index(unsigned index) -{ - return (index + 1) & MV_MAX_Q_DEPTH_MASK; + if (likely(last_sg)) + last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); + mb(); /* ensure data structure is visible to the chipset */ } -static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) +static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) { u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | (last ? CRQB_CMD_LAST : 0); @@ -1040,6 +1830,199 @@ static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned las } /** + * mv_sff_irq_clear - Clear hardware interrupt after DMA. + * @ap: Port associated with this ATA transaction. + * + * We need this only for ATAPI bmdma transactions, + * as otherwise we experience spurious interrupts + * after libata-sff handles the bmdma interrupts. + */ +static void mv_sff_irq_clear(struct ata_port *ap) +{ + mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ); +} + +/** + * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. + * @qc: queued command to check for chipset/DMA compatibility. + * + * The bmdma engines cannot handle speculative data sizes + * (bytecount under/over flow). So only allow DMA for + * data transfer commands with known data sizes. + * + * LOCKING: + * Inherited from caller. + */ +static int mv_check_atapi_dma(struct ata_queued_cmd *qc) +{ + struct scsi_cmnd *scmd = qc->scsicmd; + + if (scmd) { + switch (scmd->cmnd[0]) { + case READ_6: + case READ_10: + case READ_12: + case WRITE_6: + case WRITE_10: + case WRITE_12: + case GPCMD_READ_CD: + case GPCMD_SEND_DVD_STRUCTURE: + case GPCMD_SEND_CUE_SHEET: + return 0; /* DMA is safe */ + } + } + return -EOPNOTSUPP; /* use PIO instead */ +} + +/** + * mv_bmdma_setup - Set up BMDMA transaction + * @qc: queued command to prepare DMA for. + * + * LOCKING: + * Inherited from caller. + */ +static void mv_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *port_mmio = mv_ap_base(ap); + struct mv_port_priv *pp = ap->private_data; + + mv_fill_sg(qc); + + /* clear all DMA cmd bits */ + writel(0, port_mmio + BMDMA_CMD); + + /* load PRD table addr. */ + writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, + port_mmio + BMDMA_PRD_HIGH); + writelfl(pp->sg_tbl_dma[qc->tag], + port_mmio + BMDMA_PRD_LOW); + + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} + +/** + * mv_bmdma_start - Start a BMDMA transaction + * @qc: queued command to start DMA on. + * + * LOCKING: + * Inherited from caller. + */ +static void mv_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *port_mmio = mv_ap_base(ap); + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; + + /* start host DMA transaction */ + writelfl(cmd, port_mmio + BMDMA_CMD); +} + +/** + * mv_bmdma_stop - Stop BMDMA transfer + * @qc: queued command to stop DMA on. + * + * Clears the ATA_DMA_START flag in the bmdma control register + * + * LOCKING: + * Inherited from caller. + */ +static void mv_bmdma_stop_ap(struct ata_port *ap) +{ + void __iomem *port_mmio = mv_ap_base(ap); + u32 cmd; + + /* clear start/stop bit */ + cmd = readl(port_mmio + BMDMA_CMD); + if (cmd & ATA_DMA_START) { + cmd &= ~ATA_DMA_START; + writelfl(cmd, port_mmio + BMDMA_CMD); + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_sff_dma_pause(ap); + } +} + +static void mv_bmdma_stop(struct ata_queued_cmd *qc) +{ + mv_bmdma_stop_ap(qc->ap); +} + +/** + * mv_bmdma_status - Read BMDMA status + * @ap: port for which to retrieve DMA status. + * + * Read and return equivalent of the sff BMDMA status register. + * + * LOCKING: + * Inherited from caller. + */ +static u8 mv_bmdma_status(struct ata_port *ap) +{ + void __iomem *port_mmio = mv_ap_base(ap); + u32 reg, status; + + /* + * Other bits are valid only if ATA_DMA_ACTIVE==0, + * and the ATA_DMA_INTR bit doesn't exist. + */ + reg = readl(port_mmio + BMDMA_STATUS); + if (reg & ATA_DMA_ACTIVE) + status = ATA_DMA_ACTIVE; + else if (reg & ATA_DMA_ERR) + status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; + else { + /* + * Just because DMA_ACTIVE is 0 (DMA completed), + * this does _not_ mean the device is "done". + * So we should not yet be signalling ATA_DMA_INTR + * in some cases. Eg. DSM/TRIM, and perhaps others. + */ + mv_bmdma_stop_ap(ap); + if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY) + status = 0; + else + status = ATA_DMA_INTR; + } + return status; +} + +static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) +{ + struct ata_taskfile *tf = &qc->tf; + /* + * Workaround for 88SX60x1 FEr SATA#24. + * + * Chip may corrupt WRITEs if multi_count >= 4kB. + * Note that READs are unaffected. + * + * It's not clear if this errata really means "4K bytes", + * or if it always happens for multi_count > 7 + * regardless of device sector_size. + * + * So, for safety, any write with multi_count > 7 + * gets converted here into a regular PIO write instead: + */ + if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) { + if (qc->dev->multi_count > 7) { + switch (tf->command) { + case ATA_CMD_WRITE_MULTI: + tf->command = ATA_CMD_PIO_WRITE; + break; + case ATA_CMD_WRITE_MULTI_FUA_EXT: + tf->flags &= ~ATA_TFLAG_FUA; /* ugh */ + /* fall through */ + case ATA_CMD_WRITE_MULTI_EXT: + tf->command = ATA_CMD_PIO_WRITE_EXT; + break; + } + } + } +} + +/** * mv_qc_prep - Host specific command preparation. * @qc: queued command to prepare * @@ -1056,38 +2039,49 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; __le16 *cw; - struct ata_taskfile *tf; + struct ata_taskfile *tf = &qc->tf; u16 flags = 0; unsigned in_index; - if (ATA_PROT_DMA != qc->tf.protocol) + switch (tf->protocol) { + case ATA_PROT_DMA: + if (tf->command == ATA_CMD_DSM) + return; + /* fall-thru */ + case ATA_PROT_NCQ: + break; /* continue below */ + case ATA_PROT_PIO: + mv_rw_multi_errata_sata24(qc); return; + default: + return; + } /* Fill in command request block */ - if (!(qc->tf.flags & ATA_TFLAG_WRITE)) + if (!(tf->flags & ATA_TFLAG_WRITE)) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; + flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; - /* get current queue index from hardware */ - in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) - >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + /* get current queue index from software */ + in_index = pp->req_idx; pp->crqb[in_index].sg_addr = - cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); + cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); pp->crqb[in_index].sg_addr_hi = - cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); + cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); cw = &pp->crqb[in_index].ata_cmd[0]; - tf = &qc->tf; - /* Sadly, the CRQB cannot accomodate all registers--there are + /* Sadly, the CRQB cannot accommodate all registers--there are * only 11 bytes...so we must pick and choose required * registers based on the command. So, we drop feature and * hob_feature for [RW] DMA commands, but they are needed for - * NCQ. NCQ will drop hob_nsect. + * NCQ. NCQ will drop hob_nsect, which is not needed there + * (nsect is used only for the tag; feat/hob_feat hold true nsect). */ switch (tf->command) { case ATA_CMD_READ: @@ -1097,13 +2091,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) case ATA_CMD_WRITE_FUA_EXT: mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); break; -#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ case ATA_CMD_FPDMA_READ: case ATA_CMD_FPDMA_WRITE: mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); break; -#endif /* FIXME: remove this line when NCQ added */ default: /* The only other commands EDMA supports in non-queued and * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none @@ -1148,31 +2140,33 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; struct mv_crqb_iie *crqb; - struct ata_taskfile *tf; + struct ata_taskfile *tf = &qc->tf; unsigned in_index; u32 flags = 0; - if (ATA_PROT_DMA != qc->tf.protocol) + if ((tf->protocol != ATA_PROT_DMA) && + (tf->protocol != ATA_PROT_NCQ)) return; + if (tf->command == ATA_CMD_DSM) + return; /* use bmdma for this */ - /* Fill in Gen IIE command request block - */ - if (!(qc->tf.flags & ATA_TFLAG_WRITE)) + /* Fill in Gen IIE command request block */ + if (!(tf->flags & ATA_TFLAG_WRITE)) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; + flags |= qc->tag << CRQB_HOSTQ_SHIFT; + flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; - /* get current queue index from hardware */ - in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) - >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + /* get current queue index from software */ + in_index = pp->req_idx; crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; - crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); - crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); + crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); + crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); crqb->flags = cpu_to_le32(flags); - tf = &qc->tf; crqb->ata_cmd[0] = cpu_to_le32( (tf->command << 16) | (tf->feature << 24) @@ -1200,6 +2194,131 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) } /** + * mv_sff_check_status - fetch device status, if valid + * @ap: ATA port to fetch status from + * + * When using command issue via mv_qc_issue_fis(), + * the initial ATA_BUSY state does not show up in the + * ATA status (shadow) register. This can confuse libata! + * + * So we have a hook here to fake ATA_BUSY for that situation, + * until the first time a BUSY, DRQ, or ERR bit is seen. + * + * The rest of the time, it simply returns the ATA status register. + */ +static u8 mv_sff_check_status(struct ata_port *ap) +{ + u8 stat = ioread8(ap->ioaddr.status_addr); + struct mv_port_priv *pp = ap->private_data; + + if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { + if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) + pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; + else + stat = ATA_BUSY; + } + return stat; +} + +/** + * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register + * @fis: fis to be sent + * @nwords: number of 32-bit words in the fis + */ +static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) +{ + void __iomem *port_mmio = mv_ap_base(ap); + u32 ifctl, old_ifctl, ifstat; + int i, timeout = 200, final_word = nwords - 1; + + /* Initiate FIS transmission mode */ + old_ifctl = readl(port_mmio + SATA_IFCTL); + ifctl = 0x100 | (old_ifctl & 0xf); + writelfl(ifctl, port_mmio + SATA_IFCTL); + + /* Send all words of the FIS except for the final word */ + for (i = 0; i < final_word; ++i) + writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS); + + /* Flag end-of-transmission, and then send the final word */ + writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL); + writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS); + + /* + * Wait for FIS transmission to complete. + * This typically takes just a single iteration. + */ + do { + ifstat = readl(port_mmio + SATA_IFSTAT); + } while (!(ifstat & 0x1000) && --timeout); + + /* Restore original port configuration */ + writelfl(old_ifctl, port_mmio + SATA_IFCTL); + + /* See if it worked */ + if ((ifstat & 0x3000) != 0x1000) { + ata_port_warn(ap, "%s transmission error, ifstat=%08x\n", + __func__, ifstat); + return AC_ERR_OTHER; + } + return 0; +} + +/** + * mv_qc_issue_fis - Issue a command directly as a FIS + * @qc: queued command to start + * + * Note that the ATA shadow registers are not updated + * after command issue, so the device will appear "READY" + * if polled, even while it is BUSY processing the command. + * + * So we use a status hook to fake ATA_BUSY until the drive changes state. + * + * Note: we don't get updated shadow regs on *completion* + * of non-data commands. So avoid sending them via this function, + * as they will appear to have completed immediately. + * + * GEN_IIE has special registers that we could get the result tf from, + * but earlier chipsets do not. For now, we ignore those registers. + */ +static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct mv_port_priv *pp = ap->private_data; + struct ata_link *link = qc->dev->link; + u32 fis[5]; + int err = 0; + + ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis); + err = mv_send_fis(ap, fis, ARRAY_SIZE(fis)); + if (err) + return err; + + switch (qc->tf.protocol) { + case ATAPI_PROT_PIO: + pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; + /* fall through */ + case ATAPI_PROT_NODATA: + ap->hsm_task_state = HSM_ST_FIRST; + break; + case ATA_PROT_PIO: + pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; + if (qc->tf.flags & ATA_TFLAG_WRITE) + ap->hsm_task_state = HSM_ST_FIRST; + else + ap->hsm_task_state = HSM_ST; + break; + default: + ap->hsm_task_state = HSM_ST_LAST; + break; + } + + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_sff_queue_pio_task(link, 0); + return 0; +} + +/** * mv_qc_issue - Initiate a command to the host * @qc: queued command to start * @@ -1213,230 +2332,666 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) */ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) { - void __iomem *port_mmio = mv_ap_base(qc->ap); - struct mv_port_priv *pp = qc->ap->private_data; - unsigned in_index; - u32 in_ptr; + static int limit_warnings = 10; + struct ata_port *ap = qc->ap; + void __iomem *port_mmio = mv_ap_base(ap); + struct mv_port_priv *pp = ap->private_data; + u32 in_index; + unsigned int port_irqs; + + pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ - if (ATA_PROT_DMA != qc->tf.protocol) { - /* We're about to send a non-EDMA capable command to the - * port. Turn off EDMA so there won't be problems accessing - * shadow block, etc registers. + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + if (qc->tf.command == ATA_CMD_DSM) { + if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */ + return AC_ERR_OTHER; + break; /* use bmdma for this */ + } + /* fall thru */ + case ATA_PROT_NCQ: + mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); + pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; + in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; + + /* Write the request in pointer to kick the EDMA to life */ + writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, + port_mmio + EDMA_REQ_Q_IN_PTR); + return 0; + + case ATA_PROT_PIO: + /* + * Errata SATA#16, SATA#24: warn if multiple DRQs expected. + * + * Someday, we might implement special polling workarounds + * for these, but it all seems rather unnecessary since we + * normally use only DMA for commands which transfer more + * than a single block of data. + * + * Much of the time, this could just work regardless. + * So for now, just log the incident, and allow the attempt. */ - mv_stop_dma(qc->ap); - return ata_qc_issue_prot(qc); + if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { + --limit_warnings; + ata_link_warn(qc->dev->link, DRV_NAME + ": attempting PIO w/multiple DRQ: " + "this may fail due to h/w errata\n"); + } + /* drop through */ + case ATA_PROT_NODATA: + case ATAPI_PROT_PIO: + case ATAPI_PROT_NODATA: + if (ap->flags & ATA_FLAG_PIO_POLLING) + qc->tf.flags |= ATA_TFLAG_POLLING; + break; } - in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); - in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + if (qc->tf.flags & ATA_TFLAG_POLLING) + port_irqs = ERR_IRQ; /* mask device interrupt when polling */ + else + port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */ - /* until we do queuing, the queue should be empty at this point */ - WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) - >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); + /* + * We're about to send a non-EDMA capable command to the + * port. Turn off EDMA so there won't be problems accessing + * shadow block, etc registers. + */ + mv_stop_edma(ap); + mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs); + mv_pmp_select(ap, qc->dev->link->pmp); + + if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { + struct mv_host_priv *hpriv = ap->host->private_data; + /* + * Workaround for 88SX60x1 FEr SATA#25 (part 2). + * + * After any NCQ error, the READ_LOG_EXT command + * from libata-eh *must* use mv_qc_issue_fis(). + * Otherwise it might fail, due to chip errata. + * + * Rather than special-case it, we'll just *always* + * use this method here for READ_LOG_EXT, making for + * easier testing. + */ + if (IS_GEN_II(hpriv)) + return mv_qc_issue_fis(qc); + } + return ata_bmdma_qc_issue(qc); +} - in_index = mv_inc_q_index(in_index); /* now incr producer index */ +static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) +{ + struct mv_port_priv *pp = ap->private_data; + struct ata_queued_cmd *qc; - mv_start_dma(port_mmio, pp); + if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) + return NULL; + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) + return qc; + return NULL; +} - /* and write the request in pointer to kick the EDMA to life */ - in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; - in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT; - writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); +static void mv_pmp_error_handler(struct ata_port *ap) +{ + unsigned int pmp, pmp_map; + struct mv_port_priv *pp = ap->private_data; - return 0; + if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { + /* + * Perform NCQ error analysis on failed PMPs + * before we freeze the port entirely. + * + * The failed PMPs are marked earlier by mv_pmp_eh_prep(). + */ + pmp_map = pp->delayed_eh_pmp_map; + pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; + for (pmp = 0; pmp_map != 0; pmp++) { + unsigned int this_pmp = (1 << pmp); + if (pmp_map & this_pmp) { + struct ata_link *link = &ap->pmp_link[pmp]; + pmp_map &= ~this_pmp; + ata_eh_analyze_ncq_error(link); + } + } + ata_port_freeze(ap); + } + sata_pmp_error_handler(ap); } -/** - * mv_get_crpb_status - get status from most recently completed cmd - * @ap: ATA channel to manipulate - * - * This routine is for use when the port is in DMA mode, when it - * will be using the CRPB (command response block) method of - * returning command completion information. We check indices - * are good, grab status, and bump the response consumer index to - * prove that we're up to date. - * - * LOCKING: - * Inherited from caller. - */ -static u8 mv_get_crpb_status(struct ata_port *ap) +static unsigned int mv_get_err_pmp_map(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); + + return readl(port_mmio + SATA_TESTCTL) >> 16; +} + +static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) +{ + struct ata_eh_info *ehi; + unsigned int pmp; + + /* + * Initialize EH info for PMPs which saw device errors + */ + ehi = &ap->link.eh_info; + for (pmp = 0; pmp_map != 0; pmp++) { + unsigned int this_pmp = (1 << pmp); + if (pmp_map & this_pmp) { + struct ata_link *link = &ap->pmp_link[pmp]; + + pmp_map &= ~this_pmp; + ehi = &link->eh_info; + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "dev err"); + ehi->err_mask |= AC_ERR_DEV; + ehi->action |= ATA_EH_RESET; + ata_link_abort(link); + } + } +} + +static int mv_req_q_empty(struct ata_port *ap) +{ + void __iomem *port_mmio = mv_ap_base(ap); + u32 in_ptr, out_ptr; + + in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR) + >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR) + >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + return (in_ptr == out_ptr); /* 1 == queue_is_empty */ +} + +static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) +{ struct mv_port_priv *pp = ap->private_data; - unsigned out_index; - u32 out_ptr; - u8 ata_status; + int failed_links; + unsigned int old_map, new_map; + + /* + * Device error during FBS+NCQ operation: + * + * Set a port flag to prevent further I/O being enqueued. + * Leave the EDMA running to drain outstanding commands from this port. + * Perform the post-mortem/EH only when all responses are complete. + * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). + */ + if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { + pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; + pp->delayed_eh_pmp_map = 0; + } + old_map = pp->delayed_eh_pmp_map; + new_map = old_map | mv_get_err_pmp_map(ap); - out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); - out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + if (old_map != new_map) { + pp->delayed_eh_pmp_map = new_map; + mv_pmp_eh_prep(ap, new_map & ~old_map); + } + failed_links = hweight16(new_map); + + ata_port_info(ap, + "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n", + __func__, pp->delayed_eh_pmp_map, + ap->qc_active, failed_links, + ap->nr_active_links); + + if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { + mv_process_crpb_entries(ap, pp); + mv_stop_edma(ap); + mv_eh_freeze(ap); + ata_port_info(ap, "%s: done\n", __func__); + return 1; /* handled */ + } + ata_port_info(ap, "%s: waiting\n", __func__); + return 1; /* handled */ +} - ata_status = le16_to_cpu(pp->crpb[out_index].flags) - >> CRPB_FLAG_STATUS_SHIFT; +static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) +{ + /* + * Possible future enhancement: + * + * FBS+non-NCQ operation is not yet implemented. + * See related notes in mv_edma_cfg(). + * + * Device error during FBS+non-NCQ operation: + * + * We need to snapshot the shadow registers for each failed command. + * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). + */ + return 0; /* not handled */ +} - /* increment our consumer index... */ - out_index = mv_inc_q_index(out_index); +static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) +{ + struct mv_port_priv *pp = ap->private_data; - /* and, until we do NCQ, there should only be 1 CRPB waiting */ - WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) - >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); + if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) + return 0; /* EDMA was not active: not handled */ + if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) + return 0; /* FBS was not active: not handled */ + + if (!(edma_err_cause & EDMA_ERR_DEV)) + return 0; /* non DEV error: not handled */ + edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; + if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) + return 0; /* other problems: not handled */ + + if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { + /* + * EDMA should NOT have self-disabled for this case. + * If it did, then something is wrong elsewhere, + * and we cannot handle it here. + */ + if (edma_err_cause & EDMA_ERR_SELF_DIS) { + ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n", + __func__, edma_err_cause, pp->pp_flags); + return 0; /* not handled */ + } + return mv_handle_fbs_ncq_dev_err(ap); + } else { + /* + * EDMA should have self-disabled for this case. + * If it did not, then something is wrong elsewhere, + * and we cannot handle it here. + */ + if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { + ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n", + __func__, edma_err_cause, pp->pp_flags); + return 0; /* not handled */ + } + return mv_handle_fbs_non_ncq_dev_err(ap); + } + return 0; /* not handled */ +} - /* write out our inc'd consumer index so EDMA knows we're caught up */ - out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; - out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT; - writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); +static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + char *when = "idle"; - /* Return ATA status register for completed CRPB */ - return ata_status; + ata_ehi_clear_desc(ehi); + if (edma_was_enabled) { + when = "EDMA enabled"; + } else { + struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) + when = "polling"; + } + ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); + ehi->err_mask |= AC_ERR_OTHER; + ehi->action |= ATA_EH_RESET; + ata_port_freeze(ap); } /** * mv_err_intr - Handle error interrupts on the port * @ap: ATA channel to manipulate - * @reset_allowed: bool: 0 == don't trigger from reset here * - * In most cases, just clear the interrupt and move on. However, - * some cases require an eDMA reset, which is done right before - * the COMRESET in mv_phy_reset(). The SERR case requires a - * clear of pending errors in the SATA SERROR register. Finally, - * if the port disabled DMA, update our cached copy to match. + * Most cases require a full reset of the chip's state machine, + * which also performs a COMRESET. + * Also, if the port disabled DMA, update our cached copy to match. * * LOCKING: * Inherited from caller. */ -static void mv_err_intr(struct ata_port *ap, int reset_allowed) +static void mv_err_intr(struct ata_port *ap) +{ + void __iomem *port_mmio = mv_ap_base(ap); + u32 edma_err_cause, eh_freeze_mask, serr = 0; + u32 fis_cause = 0; + struct mv_port_priv *pp = ap->private_data; + struct mv_host_priv *hpriv = ap->host->private_data; + unsigned int action = 0, err_mask = 0; + struct ata_eh_info *ehi = &ap->link.eh_info; + struct ata_queued_cmd *qc; + int abort = 0; + + /* + * Read and clear the SError and err_cause bits. + * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear + * the FIS_IRQ_CAUSE register before clearing edma_err_cause. + */ + sata_scr_read(&ap->link, SCR_ERROR, &serr); + sata_scr_write_flush(&ap->link, SCR_ERROR, serr); + + edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE); + if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { + fis_cause = readl(port_mmio + FIS_IRQ_CAUSE); + writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE); + } + writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE); + + if (edma_err_cause & EDMA_ERR_DEV) { + /* + * Device errors during FIS-based switching operation + * require special handling. + */ + if (mv_handle_dev_err(ap, edma_err_cause)) + return; + } + + qc = mv_get_active_qc(ap); + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", + edma_err_cause, pp->pp_flags); + + if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { + ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); + if (fis_cause & FIS_IRQ_CAUSE_AN) { + u32 ec = edma_err_cause & + ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); + sata_async_notification(ap); + if (!ec) + return; /* Just an AN; no need for the nukes */ + ata_ehi_push_desc(ehi, "SDB notify"); + } + } + /* + * All generations share these EDMA error cause bits: + */ + if (edma_err_cause & EDMA_ERR_DEV) { + err_mask |= AC_ERR_DEV; + action |= ATA_EH_RESET; + ata_ehi_push_desc(ehi, "dev error"); + } + if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | + EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | + EDMA_ERR_INTRL_PAR)) { + err_mask |= AC_ERR_ATA_BUS; + action |= ATA_EH_RESET; + ata_ehi_push_desc(ehi, "parity error"); + } + if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? + "dev disconnect" : "dev connect"); + action |= ATA_EH_RESET; + } + + /* + * Gen-I has a different SELF_DIS bit, + * different FREEZE bits, and no SERR bit: + */ + if (IS_GEN_I(hpriv)) { + eh_freeze_mask = EDMA_EH_FREEZE_5; + if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { + pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; + ata_ehi_push_desc(ehi, "EDMA self-disable"); + } + } else { + eh_freeze_mask = EDMA_EH_FREEZE; + if (edma_err_cause & EDMA_ERR_SELF_DIS) { + pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; + ata_ehi_push_desc(ehi, "EDMA self-disable"); + } + if (edma_err_cause & EDMA_ERR_SERR) { + ata_ehi_push_desc(ehi, "SError=%08x", serr); + err_mask |= AC_ERR_ATA_BUS; + action |= ATA_EH_RESET; + } + } + + if (!err_mask) { + err_mask = AC_ERR_OTHER; + action |= ATA_EH_RESET; + } + + ehi->serror |= serr; + ehi->action |= action; + + if (qc) + qc->err_mask |= err_mask; + else + ehi->err_mask |= err_mask; + + if (err_mask == AC_ERR_DEV) { + /* + * Cannot do ata_port_freeze() here, + * because it would kill PIO access, + * which is needed for further diagnosis. + */ + mv_eh_freeze(ap); + abort = 1; + } else if (edma_err_cause & eh_freeze_mask) { + /* + * Note to self: ata_port_freeze() calls ata_port_abort() + */ + ata_port_freeze(ap); + } else { + abort = 1; + } + + if (abort) { + if (qc) + ata_link_abort(qc->dev->link); + else + ata_port_abort(ap); + } +} + +static bool mv_process_crpb_response(struct ata_port *ap, + struct mv_crpb *response, unsigned int tag, int ncq_enabled) +{ + u8 ata_status; + u16 edma_status = le16_to_cpu(response->flags); + + /* + * edma_status from a response queue entry: + * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only). + * MSB is saved ATA status from command completion. + */ + if (!ncq_enabled) { + u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; + if (err_cause) { + /* + * Error will be seen/handled by + * mv_err_intr(). So do nothing at all here. + */ + return false; + } + } + ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; + if (!ac_err_mask(ata_status)) + return true; + /* else: leave it for mv_err_intr() */ + return false; +} + +static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) { void __iomem *port_mmio = mv_ap_base(ap); - u32 edma_err_cause, serr = 0; + struct mv_host_priv *hpriv = ap->host->private_data; + u32 in_index; + bool work_done = false; + u32 done_mask = 0; + int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); + + /* Get the hardware queue position index */ + in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR) + >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; - edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); + /* Process new responses from since the last time we looked */ + while (in_index != pp->resp_idx) { + unsigned int tag; + struct mv_crpb *response = &pp->crpb[pp->resp_idx]; - if (EDMA_ERR_SERR & edma_err_cause) { - sata_scr_read(ap, SCR_ERROR, &serr); - sata_scr_write_flush(ap, SCR_ERROR, serr); + pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; + + if (IS_GEN_I(hpriv)) { + /* 50xx: no NCQ, only one command active at a time */ + tag = ap->link.active_tag; + } else { + /* Gen II/IIE: get command tag from CRPB entry */ + tag = le16_to_cpu(response->id) & 0x1f; + } + if (mv_process_crpb_response(ap, response, tag, ncq_enabled)) + done_mask |= 1 << tag; + work_done = true; } - if (EDMA_ERR_SELF_DIS & edma_err_cause) { - struct mv_port_priv *pp = ap->private_data; - pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; + + if (work_done) { + ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + + /* Update the software queue position index in hardware */ + writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | + (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), + port_mmio + EDMA_RSP_Q_OUT_PTR); } - DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x " - "SERR: 0x%08x\n", ap->id, edma_err_cause, serr); +} - /* Clear EDMA now that SERR cleanup done */ - writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); +static void mv_port_intr(struct ata_port *ap, u32 port_cause) +{ + struct mv_port_priv *pp; + int edma_was_enabled; - /* check for fatal here and recover if needed */ - if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause)) - mv_stop_and_reset(ap); + /* + * Grab a snapshot of the EDMA_EN flag setting, + * so that we have a consistent view for this port, + * even if something we call of our routines changes it. + */ + pp = ap->private_data; + edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); + /* + * Process completed CRPB response(s) before other events. + */ + if (edma_was_enabled && (port_cause & DONE_IRQ)) { + mv_process_crpb_entries(ap, pp); + if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) + mv_handle_fbs_ncq_dev_err(ap); + } + /* + * Handle chip-reported errors, or continue on to handle PIO. + */ + if (unlikely(port_cause & ERR_IRQ)) { + mv_err_intr(ap); + } else if (!edma_was_enabled) { + struct ata_queued_cmd *qc = mv_get_active_qc(ap); + if (qc) + ata_bmdma_port_intr(ap, qc); + else + mv_unexpected_intr(ap, edma_was_enabled); + } } /** * mv_host_intr - Handle all interrupts on the given host controller * @host: host specific structure - * @relevant: port error bits relevant to this host controller - * @hc: which host controller we're to look at - * - * Read then write clear the HC interrupt status then walk each - * port connected to the HC and see if it needs servicing. Port - * success ints are reported in the HC interrupt status reg, the - * port error ints are reported in the higher level main - * interrupt status register and thus are passed in via the - * 'relevant' argument. + * @main_irq_cause: Main interrupt cause register for the chip. * * LOCKING: * Inherited from caller. */ -static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) +static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) { - void __iomem *mmio = host->mmio_base; - void __iomem *hc_mmio = mv_hc_base(mmio, hc); - struct ata_queued_cmd *qc; - u32 hc_irq_cause; - int shift, port, port0, hard_port, handled; - unsigned int err_mask; - - if (hc == 0) { - port0 = 0; - } else { - port0 = MV_PORTS_PER_HC; - } - - /* we'll need the HC success int register in most cases */ - hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); - if (hc_irq_cause) { - writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); - } + struct mv_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->base, *hc_mmio; + unsigned int handled = 0, port; - VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", - hc,relevant,hc_irq_cause); + /* If asserted, clear the "all ports" IRQ coalescing bit */ + if (main_irq_cause & ALL_PORTS_COAL_DONE) + writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); - for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { - u8 ata_status = 0; + for (port = 0; port < hpriv->n_ports; port++) { struct ata_port *ap = host->ports[port]; - struct mv_port_priv *pp = ap->private_data; + unsigned int p, shift, hardport, port_cause; - hard_port = mv_hardport_from_port(port); /* range 0..3 */ - handled = 0; /* ensure ata_status is set if handled++ */ - - /* Note that DEV_IRQ might happen spuriously during EDMA, - * and should be ignored in such cases. - * The cause of this is still under investigation. + MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); + /* + * Each hc within the host has its own hc_irq_cause register, + * where the interrupting ports bits get ack'd. */ - if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { - /* EDMA: check for response queue interrupt */ - if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { - ata_status = mv_get_crpb_status(ap); - handled = 1; + if (hardport == 0) { /* first port on this hc ? */ + u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; + u32 port_mask, ack_irqs; + /* + * Skip this entire hc if nothing pending for any ports + */ + if (!hc_cause) { + port += MV_PORTS_PER_HC - 1; + continue; } - } else { - /* PIO: check for device (drive) interrupt */ - if ((DEV_IRQ << hard_port) & hc_irq_cause) { - ata_status = readb((void __iomem *) - ap->ioaddr.status_addr); - handled = 1; - /* ignore spurious intr if drive still BUSY */ - if (ata_status & ATA_BUSY) { - ata_status = 0; - handled = 0; - } + /* + * We don't need/want to read the hc_irq_cause register, + * because doing so hurts performance, and + * main_irq_cause already gives us everything we need. + * + * But we do have to *write* to the hc_irq_cause to ack + * the ports that we are handling this time through. + * + * This requires that we create a bitmap for those + * ports which interrupted us, and use that bitmap + * to ack (only) those ports via hc_irq_cause. + */ + ack_irqs = 0; + if (hc_cause & PORTS_0_3_COAL_DONE) + ack_irqs = HC_COAL_IRQ; + for (p = 0; p < MV_PORTS_PER_HC; ++p) { + if ((port + p) >= hpriv->n_ports) + break; + port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); + if (hc_cause & port_mask) + ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; } - } - - if (ap && (ap->flags & ATA_FLAG_DISABLED)) - continue; - - err_mask = ac_err_mask(ata_status); - - shift = port << 1; /* (port * 2) */ - if (port >= MV_PORTS_PER_HC) { - shift++; /* skip bit 8 in the HC Main IRQ reg */ - } - if ((PORT0_ERR << shift) & relevant) { - mv_err_intr(ap, 1); - err_mask |= AC_ERR_OTHER; + hc_mmio = mv_hc_base_from_port(mmio, port); + writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE); handled = 1; } + /* + * Handle interrupts signalled for this port: + */ + port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); + if (port_cause) + mv_port_intr(ap, port_cause); + } + return handled; +} - if (handled) { - qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) { - VPRINTK("port %u IRQ found for qc, " - "ata_status 0x%x\n", port,ata_status); - /* mark qc status appropriately */ - if (!(qc->tf.flags & ATA_TFLAG_POLLING)) { - qc->err_mask |= err_mask; - ata_qc_complete(qc); - } - } +static int mv_pci_error(struct ata_host *host, void __iomem *mmio) +{ + struct mv_host_priv *hpriv = host->private_data; + struct ata_port *ap; + struct ata_queued_cmd *qc; + struct ata_eh_info *ehi; + unsigned int i, err_mask, printed = 0; + u32 err_cause; + + err_cause = readl(mmio + hpriv->irq_cause_offset); + + dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause); + + DPRINTK("All regs @ PCI error\n"); + mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); + + writelfl(0, mmio + hpriv->irq_cause_offset); + + for (i = 0; i < host->n_ports; i++) { + ap = host->ports[i]; + if (!ata_link_offline(&ap->link)) { + ehi = &ap->link.eh_info; + ata_ehi_clear_desc(ehi); + if (!printed++) + ata_ehi_push_desc(ehi, + "PCI err cause 0x%08x", err_cause); + err_mask = AC_ERR_HOST_BUS; + ehi->action = ATA_EH_RESET; + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc) + qc->err_mask |= err_mask; + else + ehi->err_mask |= err_mask; + + ata_port_freeze(ap); } } - VPRINTK("EXIT\n"); + return 1; /* handled */ } /** - * mv_interrupt - + * mv_interrupt - Main interrupt event handler * @irq: unused * @dev_instance: private data; in this case the host structure - * @regs: unused * * Read the read only register to determine if any host * controllers have pending interrupts. If so, call lower level @@ -1447,68 +3002,42 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) * This routine holds the host lock while processing pending * interrupts. */ -static irqreturn_t mv_interrupt(int irq, void *dev_instance, - struct pt_regs *regs) +static irqreturn_t mv_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; - unsigned int hc, handled = 0, n_hcs; - void __iomem *mmio = host->mmio_base; - struct mv_host_priv *hpriv; - u32 irq_stat; - - irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); - - /* check the cases where we either have nothing pending or have read - * a bogus register value which can indicate HW removal or PCI fault - */ - if (!irq_stat || (0xffffffffU == irq_stat)) { - return IRQ_NONE; - } + struct mv_host_priv *hpriv = host->private_data; + unsigned int handled = 0; + int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; + u32 main_irq_cause, pending_irqs; - n_hcs = mv_get_hc_count(host->ports[0]->flags); spin_lock(&host->lock); - for (hc = 0; hc < n_hcs; hc++) { - u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); - if (relevant) { - mv_host_intr(host, relevant, hc); - handled++; - } - } + /* for MSI: block new interrupts while in here */ + if (using_msi) + mv_write_main_irq_mask(0, hpriv); - hpriv = host->private_data; - if (IS_60XX(hpriv)) { - /* deal with the interrupt coalescing bits */ - if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { - writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO); - writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI); - writelfl(0, mmio + MV_IRQ_COAL_CAUSE); - } + main_irq_cause = readl(hpriv->main_irq_cause_addr); + pending_irqs = main_irq_cause & hpriv->main_irq_mask; + /* + * Deal with cases where we either have nothing pending, or have read + * a bogus register value which can indicate HW removal or PCI fault. + */ + if (pending_irqs && main_irq_cause != 0xffffffffU) { + if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) + handled = mv_pci_error(host, hpriv->base); + else + handled = mv_host_intr(host, pending_irqs); } - if (PCI_ERR & irq_stat) { - printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", - readl(mmio + PCI_IRQ_CAUSE_OFS)); + /* for MSI: unmask; interrupt cause bits will retrigger now */ + if (using_msi) + mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv); - DPRINTK("All regs @ PCI error\n"); - mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); - - writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); - handled++; - } spin_unlock(&host->lock); return IRQ_RETVAL(handled); } -static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) -{ - void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); - unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; - - return hc_mmio + ofs; -} - static unsigned int mv5_scr_offset(unsigned int sc_reg_in) { unsigned int ofs; @@ -1526,34 +3055,40 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in) return ofs; } -static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in) +static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) { - void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no); + struct mv_host_priv *hpriv = link->ap->host->private_data; + void __iomem *mmio = hpriv->base; + void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); - if (ofs != 0xffffffffU) - return readl(mmio + ofs); - else - return (u32) ofs; + if (ofs != 0xffffffffU) { + *val = readl(addr + ofs); + return 0; + } else + return -EINVAL; } -static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) +static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) { - void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no); + struct mv_host_priv *hpriv = link->ap->host->private_data; + void __iomem *mmio = hpriv->base; + void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); - if (ofs != 0xffffffffU) - writelfl(val, mmio + ofs); + if (ofs != 0xffffffffU) { + writelfl(val, addr + ofs); + return 0; + } else + return -EINVAL; } -static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) +static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) { - u8 rev_id; + struct pci_dev *pdev = to_pci_dev(host->dev); int early_5080; - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); - - early_5080 = (pdev->device == 0x5080) && (rev_id == 0); + early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); if (!early_5080) { u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); @@ -1561,12 +3096,12 @@ static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); } - mv_reset_pci_bus(pdev, mmio); + mv_reset_pci_bus(host, mmio); } static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) { - writel(0x0fcfffff, mmio + MV_FLASH_CTL); + writel(0x0fcfffff, mmio + FLASH_CTL); } static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, @@ -1585,7 +3120,7 @@ static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { u32 tmp; - writel(0, mmio + MV_GPIO_PORT_CTL); + writel(0, mmio + GPIO_PORT_CTL); /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ @@ -1603,9 +3138,9 @@ static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); if (fix_apm_sq) { - tmp = readl(phy_mmio + MV5_LT_MODE); + tmp = readl(phy_mmio + MV5_LTMODE); tmp |= (1 << 19); - writel(tmp, phy_mmio + MV5_LT_MODE); + writel(tmp, phy_mmio + MV5_LTMODE); tmp = readl(phy_mmio + MV5_PHY_CTL); tmp &= ~0x3; @@ -1628,12 +3163,10 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, { void __iomem *port_mmio = mv_port_base(mmio, port); - writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); - - mv_channel_reset(hpriv, mmio, port); + mv_reset_channel(hpriv, mmio, port); ZERO(0x028); /* command */ - writel(0x11f, port_mmio + EDMA_CFG_OFS); + writel(0x11f, port_mmio + EDMA_CFG); ZERO(0x004); /* timer */ ZERO(0x008); /* irq err cause */ ZERO(0x00c); /* irq err mask */ @@ -1685,8 +3218,9 @@ static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, #undef ZERO #define ZERO(reg) writel(0, mmio + (reg)) -static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) +static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) { + struct mv_host_priv *hpriv = host->private_data; u32 tmp; tmp = readl(mmio + MV_PCI_MODE); @@ -1696,10 +3230,9 @@ static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) ZERO(MV_PCI_DISC_TIMER); ZERO(MV_PCI_MSI_TRIGGER); writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); - ZERO(HC_MAIN_IRQ_MASK_OFS); ZERO(MV_PCI_SERR_MASK); - ZERO(PCI_IRQ_CAUSE_OFS); - ZERO(PCI_IRQ_MASK_OFS); + ZERO(hpriv->irq_cause_offset); + ZERO(hpriv->irq_mask_offset); ZERO(MV_PCI_ERR_LOW_ADDRESS); ZERO(MV_PCI_ERR_HIGH_ADDRESS); ZERO(MV_PCI_ERR_ATTRIBUTE); @@ -1713,10 +3246,10 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) mv5_reset_flash(hpriv, mmio); - tmp = readl(mmio + MV_GPIO_PORT_CTL); + tmp = readl(mmio + GPIO_PORT_CTL); tmp &= 0x3; tmp |= (1 << 5) | (1 << 6); - writel(tmp, mmio + MV_GPIO_PORT_CTL); + writel(tmp, mmio + GPIO_PORT_CTL); } /** @@ -1731,7 +3264,7 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc) { - void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS; + void __iomem *reg = mmio + PCI_MAIN_CMD_STS; int i, rc = 0; u32 t; @@ -1744,9 +3277,8 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, for (i = 0; i < 1000; i++) { udelay(1); t = readl(reg); - if (PCI_MASTER_EMPTY & t) { + if (PCI_MASTER_EMPTY & t) break; - } } if (!(PCI_MASTER_EMPTY & t)) { printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); @@ -1790,7 +3322,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *port_mmio; u32 tmp; - tmp = readl(mmio + MV_RESET_CFG); + tmp = readl(mmio + RESET_CFG); if ((tmp & (1 << 0)) == 0) { hpriv->signal[idx].amps = 0x7 << 8; hpriv->signal[idx].pre = 0x1 << 5; @@ -1806,7 +3338,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { - writel(0x00000060, mmio + MV_GPIO_PORT_CTL); + writel(0x00000060, mmio + GPIO_PORT_CTL); } static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, @@ -1819,7 +3351,7 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); int fix_phy_mode4 = hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); - u32 m2, tmp; + u32 m2, m3; if (fix_phy_mode2) { m2 = readl(port_mmio + PHY_MODE2); @@ -1836,27 +3368,37 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, udelay(200); } - /* who knows what this magic does */ - tmp = readl(port_mmio + PHY_MODE3); - tmp &= ~0x7F800000; - tmp |= 0x2A800000; - writel(tmp, port_mmio + PHY_MODE3); - - if (fix_phy_mode4) { - u32 m4; - - m4 = readl(port_mmio + PHY_MODE4); - - if (hp_flags & MV_HP_ERRATA_60X1B2) - tmp = readl(port_mmio + 0x310); + /* + * Gen-II/IIe PHY_MODE3 errata RM#2: + * Achieves better receiver noise performance than the h/w default: + */ + m3 = readl(port_mmio + PHY_MODE3); + m3 = (m3 & 0x1f) | (0x5555601 << 5); - m4 = (m4 & ~(1 << 1)) | (1 << 0); + /* Guideline 88F5182 (GL# SATA-S11) */ + if (IS_SOC(hpriv)) + m3 &= ~0x1c; + if (fix_phy_mode4) { + u32 m4 = readl(port_mmio + PHY_MODE4); + /* + * Enforce reserved-bit restrictions on GenIIe devices only. + * For earlier chipsets, force only the internal config field + * (workaround for errata FEr SATA#10 part 1). + */ + if (IS_GEN_IIE(hpriv)) + m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; + else + m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; writel(m4, port_mmio + PHY_MODE4); - - if (hp_flags & MV_HP_ERRATA_60X1B2) - writel(tmp, port_mmio + 0x310); } + /* + * Workaround for 60x1-B2 errata SATA#13: + * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, + * so we must always rewrite PHY_MODE3 after PHY_MODE4. + * Or ensure we use writelfl() when writing PHY_MODE4. + */ + writel(m3, port_mmio + PHY_MODE3); /* Revert values of pre-emphasis and signal amps to the saved ones */ m2 = readl(port_mmio + PHY_MODE2); @@ -1875,191 +3417,271 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, writel(m2, port_mmio + PHY_MODE2); } -static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, - unsigned int port_no) +/* TODO: use the generic LED interface to configure the SATA Presence */ +/* & Acitivy LEDs on the board */ +static void mv_soc_enable_leds(struct mv_host_priv *hpriv, + void __iomem *mmio) { - void __iomem *port_mmio = mv_port_base(mmio, port_no); + return; +} - writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); +static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, + void __iomem *mmio) +{ + void __iomem *port_mmio; + u32 tmp; - if (IS_60XX(hpriv)) { - u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); - ifctl |= (1 << 7); /* enable gen2i speed */ - ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ - writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); - } + port_mmio = mv_port_base(mmio, idx); + tmp = readl(port_mmio + PHY_MODE2); - udelay(25); /* allow reset propagation */ + hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ + hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ +} - /* Spec never mentions clearing the bit. Marvell's driver does - * clear the bit, however. - */ - writelfl(0, port_mmio + EDMA_CMD_OFS); +#undef ZERO +#define ZERO(reg) writel(0, port_mmio + (reg)) +static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, + void __iomem *mmio, unsigned int port) +{ + void __iomem *port_mmio = mv_port_base(mmio, port); - hpriv->ops->phy_errata(hpriv, mmio, port_no); + mv_reset_channel(hpriv, mmio, port); + + ZERO(0x028); /* command */ + writel(0x101f, port_mmio + EDMA_CFG); + ZERO(0x004); /* timer */ + ZERO(0x008); /* irq err cause */ + ZERO(0x00c); /* irq err mask */ + ZERO(0x010); /* rq bah */ + ZERO(0x014); /* rq inp */ + ZERO(0x018); /* rq outp */ + ZERO(0x01c); /* respq bah */ + ZERO(0x024); /* respq outp */ + ZERO(0x020); /* respq inp */ + ZERO(0x02c); /* test control */ + writel(0x800, port_mmio + EDMA_IORDY_TMOUT); +} + +#undef ZERO + +#define ZERO(reg) writel(0, hc_mmio + (reg)) +static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, + void __iomem *mmio) +{ + void __iomem *hc_mmio = mv_hc_base(mmio, 0); + + ZERO(0x00c); + ZERO(0x010); + ZERO(0x014); - if (IS_50XX(hpriv)) - mdelay(1); } -static void mv_stop_and_reset(struct ata_port *ap) +#undef ZERO + +static int mv_soc_reset_hc(struct mv_host_priv *hpriv, + void __iomem *mmio, unsigned int n_hc) { - struct mv_host_priv *hpriv = ap->host->private_data; - void __iomem *mmio = ap->host->mmio_base; + unsigned int port; - mv_stop_dma(ap); + for (port = 0; port < hpriv->n_ports; port++) + mv_soc_reset_hc_port(hpriv, mmio, port); - mv_channel_reset(hpriv, mmio, ap->port_no); + mv_soc_reset_one_hc(hpriv, mmio); - __mv_phy_reset(ap, 0); + return 0; } -static inline void __msleep(unsigned int msec, int can_sleep) +static void mv_soc_reset_flash(struct mv_host_priv *hpriv, + void __iomem *mmio) { - if (can_sleep) - msleep(msec); - else - mdelay(msec); + return; +} + +static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) +{ + return; +} + +static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, + void __iomem *mmio, unsigned int port) +{ + void __iomem *port_mmio = mv_port_base(mmio, port); + u32 reg; + + reg = readl(port_mmio + PHY_MODE3); + reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */ + reg |= (0x1 << 27); + reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */ + reg |= (0x1 << 29); + writel(reg, port_mmio + PHY_MODE3); + + reg = readl(port_mmio + PHY_MODE4); + reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */ + reg |= (0x1 << 16); + writel(reg, port_mmio + PHY_MODE4); + + reg = readl(port_mmio + PHY_MODE9_GEN2); + reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ + reg |= 0x8; + reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ + writel(reg, port_mmio + PHY_MODE9_GEN2); + + reg = readl(port_mmio + PHY_MODE9_GEN1); + reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ + reg |= 0x8; + reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ + writel(reg, port_mmio + PHY_MODE9_GEN1); } /** - * __mv_phy_reset - Perform eDMA reset followed by COMRESET - * @ap: ATA channel to manipulate - * - * Part of this is taken from __sata_phy_reset and modified to - * not sleep since this routine gets called from interrupt level. + * soc_is_65 - check if the soc is 65 nano device * - * LOCKING: - * Inherited from caller. This is coded to safe to call at - * interrupt level, i.e. it does not sleep. + * Detect the type of the SoC, this is done by reading the PHYCFG_OFS + * register, this register should contain non-zero value and it exists only + * in the 65 nano devices, when reading it from older devices we get 0. */ -static void __mv_phy_reset(struct ata_port *ap, int can_sleep) +static bool soc_is_65n(struct mv_host_priv *hpriv) { - struct mv_port_priv *pp = ap->private_data; - struct mv_host_priv *hpriv = ap->host->private_data; - void __iomem *port_mmio = mv_ap_base(ap); - struct ata_taskfile tf; - struct ata_device *dev = &ap->device[0]; - unsigned long timeout; - int retry = 5; - u32 sstatus; + void __iomem *port0_mmio = mv_port_base(hpriv->base, 0); - VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); - - DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " - "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), - mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); + if (readl(port0_mmio + PHYCFG_OFS)) + return true; + return false; +} - /* Issue COMRESET via SControl */ -comreset_retry: - sata_scr_write_flush(ap, SCR_CONTROL, 0x301); - __msleep(1, can_sleep); +static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) +{ + u32 ifcfg = readl(port_mmio + SATA_IFCFG); - sata_scr_write_flush(ap, SCR_CONTROL, 0x300); - __msleep(20, can_sleep); + ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ + if (want_gen2i) + ifcfg |= (1 << 7); /* enable gen2i speed */ + writelfl(ifcfg, port_mmio + SATA_IFCFG); +} - timeout = jiffies + msecs_to_jiffies(200); - do { - sata_scr_read(ap, SCR_STATUS, &sstatus); - if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) - break; +static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, + unsigned int port_no) +{ + void __iomem *port_mmio = mv_port_base(mmio, port_no); - __msleep(1, can_sleep); - } while (time_before(jiffies, timeout)); + /* + * The datasheet warns against setting EDMA_RESET when EDMA is active + * (but doesn't say what the problem might be). So we first try + * to disable the EDMA engine before doing the EDMA_RESET operation. + */ + mv_stop_edma_engine(port_mmio); + writelfl(EDMA_RESET, port_mmio + EDMA_CMD); - /* work around errata */ - if (IS_60XX(hpriv) && - (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) && - (retry-- > 0)) - goto comreset_retry; + if (!IS_GEN_I(hpriv)) { + /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ + mv_setup_ifcfg(port_mmio, 1); + } + /* + * Strobing EDMA_RESET here causes a hard reset of the SATA transport, + * link, and physical layers. It resets all SATA interface registers + * (except for SATA_IFCFG), and issues a COMRESET to the dev. + */ + writelfl(EDMA_RESET, port_mmio + EDMA_CMD); + udelay(25); /* allow reset propagation */ + writelfl(0, port_mmio + EDMA_CMD); - DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " - "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), - mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); + hpriv->ops->phy_errata(hpriv, mmio, port_no); - if (ata_port_online(ap)) { - ata_port_probe(ap); - } else { - sata_scr_read(ap, SCR_STATUS, &sstatus); - ata_port_printk(ap, KERN_INFO, - "no device found (phy stat %08x)\n", sstatus); - ata_port_disable(ap); - return; - } - ap->cbl = ATA_CBL_SATA; + if (IS_GEN_I(hpriv)) + mdelay(1); +} - /* even after SStatus reflects that device is ready, - * it seems to take a while for link to be fully - * established (and thus Status no longer 0x80/0x7F), - * so we poll a bit for that, here. - */ - retry = 20; - while (1) { - u8 drv_stat = ata_check_status(ap); - if ((drv_stat != 0x80) && (drv_stat != 0x7f)) - break; - __msleep(500, can_sleep); - if (retry-- <= 0) - break; +static void mv_pmp_select(struct ata_port *ap, int pmp) +{ + if (sata_pmp_supported(ap)) { + void __iomem *port_mmio = mv_ap_base(ap); + u32 reg = readl(port_mmio + SATA_IFCTL); + int old = reg & 0xf; + + if (old != pmp) { + reg = (reg & ~0xf) | pmp; + writelfl(reg, port_mmio + SATA_IFCTL); + } } +} - tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); - tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); - tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr); - tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr); +static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + mv_pmp_select(link->ap, sata_srst_pmp(link)); + return sata_std_hardreset(link, class, deadline); +} - dev->class = ata_dev_classify(&tf); - if (!ata_dev_enabled(dev)) { - VPRINTK("Port disabled post-sig: No device present.\n"); - ata_port_disable(ap); - } +static int mv_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + mv_pmp_select(link->ap, sata_srst_pmp(link)); + return ata_sff_softreset(link, class, deadline); +} - writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); +static int mv_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct mv_host_priv *hpriv = ap->host->private_data; + struct mv_port_priv *pp = ap->private_data; + void __iomem *mmio = hpriv->base; + int rc, attempts = 0, extra = 0; + u32 sstatus; + bool online; + mv_reset_channel(hpriv, mmio, ap->port_no); pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; + pp->pp_flags &= + ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); - VPRINTK("EXIT\n"); + /* Workaround for errata FEr SATA#10 (part 2) */ + do { + const unsigned long *timing = + sata_ehc_deb_timing(&link->eh_context); + + rc = sata_link_hardreset(link, timing, deadline + extra, + &online, NULL); + rc = online ? -EAGAIN : rc; + if (rc) + return rc; + sata_scr_read(link, SCR_STATUS, &sstatus); + if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { + /* Force 1.5gb/s link speed and try again */ + mv_setup_ifcfg(mv_ap_base(ap), 0); + if (time_after(jiffies + HZ, deadline)) + extra = HZ; /* only extend it once, max */ + } + } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); + mv_save_cached_regs(ap); + mv_edma_cfg(ap, 0, 0); + + return rc; } -static void mv_phy_reset(struct ata_port *ap) +static void mv_eh_freeze(struct ata_port *ap) { - __mv_phy_reset(ap, 1); + mv_stop_edma(ap); + mv_enable_port_irqs(ap, 0); } -/** - * mv_eng_timeout - Routine called by libata when SCSI times out I/O - * @ap: ATA channel to manipulate - * - * Intent is to clear all pending error conditions, reset the - * chip/bus, fail the command, and move on. - * - * LOCKING: - * This routine holds the host lock while failing the command. - */ -static void mv_eng_timeout(struct ata_port *ap) +static void mv_eh_thaw(struct ata_port *ap) { - struct ata_queued_cmd *qc; - unsigned long flags; - - ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n"); - DPRINTK("All regs @ start of eng_timeout\n"); - mv_dump_all_regs(ap->host->mmio_base, ap->port_no, - to_pci_dev(ap->host->dev)); + struct mv_host_priv *hpriv = ap->host->private_data; + unsigned int port = ap->port_no; + unsigned int hardport = mv_hardport_from_port(port); + void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); + void __iomem *port_mmio = mv_ap_base(ap); + u32 hc_irq_cause; - qc = ata_qc_from_tag(ap, ap->active_tag); - printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", - ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd); + /* clear EDMA errors on this port */ + writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE); - spin_lock_irqsave(&ap->host->lock, flags); - mv_err_intr(ap, 0); - mv_stop_and_reset(ap); - spin_unlock_irqrestore(&ap->host->lock, flags); + /* clear pending irq events */ + hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); + writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); - WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); - if (qc->flags & ATA_QCFLAG_ACTIVE) { - qc->err_mask |= AC_ERR_TIMEOUT; - ata_eh_qc_complete(qc); - } + mv_enable_port_irqs(ap, ERR_IRQ); } /** @@ -2076,8 +3698,7 @@ static void mv_eng_timeout(struct ata_port *ap) */ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) { - unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; - unsigned serr_ofs; + void __iomem *serr, *shd_base = port_mmio + SHD_BLK; /* PIO related setup */ @@ -2092,39 +3713,74 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) port->status_addr = port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); /* special case: control/altstatus doesn't have ATA_REG_ address */ - port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; - - /* unused: */ - port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; + port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; /* Clear any currently outstanding port interrupt conditions */ - serr_ofs = mv_scr_offset(SCR_ERROR); - writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); - writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); + serr = port_mmio + mv_scr_offset(SCR_ERROR); + writelfl(readl(serr), serr); + writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); - /* unmask all EDMA error interrupts */ - writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); + /* unmask all non-transient EDMA error interrupts */ + writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK); VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", - readl(port_mmio + EDMA_CFG_OFS), - readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), - readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); + readl(port_mmio + EDMA_CFG), + readl(port_mmio + EDMA_ERR_IRQ_CAUSE), + readl(port_mmio + EDMA_ERR_IRQ_MASK)); } -static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, - unsigned int board_idx) +static unsigned int mv_in_pcix_mode(struct ata_host *host) { - u8 rev_id; - u32 hp_flags = hpriv->hp_flags; + struct mv_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->base; + u32 reg; + + if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) + return 0; /* not PCI-X capable */ + reg = readl(mmio + MV_PCI_MODE); + if ((reg & MV_PCI_MODE_MASK) == 0) + return 0; /* conventional PCI mode */ + return 1; /* chip is in PCI-X mode */ +} + +static int mv_pci_cut_through_okay(struct ata_host *host) +{ + struct mv_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->base; + u32 reg; - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); + if (!mv_in_pcix_mode(host)) { + reg = readl(mmio + MV_PCI_COMMAND); + if (reg & MV_PCI_COMMAND_MRDTRIG) + return 0; /* not okay */ + } + return 1; /* okay */ +} + +static void mv_60x1b2_errata_pci7(struct ata_host *host) +{ + struct mv_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->base; + + /* workaround for 60x1-B2 errata PCI#7 */ + if (mv_in_pcix_mode(host)) { + u32 reg = readl(mmio + MV_PCI_COMMAND); + writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND); + } +} - switch(board_idx) { +static int mv_chip_id(struct ata_host *host, unsigned int board_idx) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + struct mv_host_priv *hpriv = host->private_data; + u32 hp_flags = hpriv->hp_flags; + + switch (board_idx) { case chip_5080: hpriv->ops = &mv5xxx_ops; - hp_flags |= MV_HP_50XX; + hp_flags |= MV_HP_GEN_I; - switch (rev_id) { + switch (pdev->revision) { case 0x1: hp_flags |= MV_HP_ERRATA_50XXB0; break; @@ -2132,8 +3788,8 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, hp_flags |= MV_HP_ERRATA_50XXB2; break; default: - dev_printk(KERN_WARNING, &pdev->dev, - "Applying 50XXB2 workarounds to unknown rev\n"); + dev_warn(&pdev->dev, + "Applying 50XXB2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_50XXB2; break; } @@ -2142,9 +3798,9 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, case chip_504x: case chip_508x: hpriv->ops = &mv5xxx_ops; - hp_flags |= MV_HP_50XX; + hp_flags |= MV_HP_GEN_I; - switch (rev_id) { + switch (pdev->revision) { case 0x0: hp_flags |= MV_HP_ERRATA_50XXB0; break; @@ -2152,8 +3808,8 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, hp_flags |= MV_HP_ERRATA_50XXB2; break; default: - dev_printk(KERN_WARNING, &pdev->dev, - "Applying B2 workarounds to unknown rev\n"); + dev_warn(&pdev->dev, + "Applying B2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_50XXB2; break; } @@ -2162,57 +3818,104 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, case chip_604x: case chip_608x: hpriv->ops = &mv6xxx_ops; + hp_flags |= MV_HP_GEN_II; - switch (rev_id) { + switch (pdev->revision) { case 0x7: + mv_60x1b2_errata_pci7(host); hp_flags |= MV_HP_ERRATA_60X1B2; break; case 0x9: hp_flags |= MV_HP_ERRATA_60X1C0; break; default: - dev_printk(KERN_WARNING, &pdev->dev, - "Applying B2 workarounds to unknown rev\n"); + dev_warn(&pdev->dev, + "Applying B2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_60X1B2; break; } break; case chip_7042: + hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; + if (pdev->vendor == PCI_VENDOR_ID_TTI && + (pdev->device == 0x2300 || pdev->device == 0x2310)) + { + /* + * Highpoint RocketRAID PCIe 23xx series cards: + * + * Unconfigured drives are treated as "Legacy" + * by the BIOS, and it overwrites sector 8 with + * a "Lgcy" metadata block prior to Linux boot. + * + * Configured drives (RAID or JBOD) leave sector 8 + * alone, but instead overwrite a high numbered + * sector for the RAID metadata. This sector can + * be determined exactly, by truncating the physical + * drive capacity to a nice even GB value. + * + * RAID metadata is at: (dev->n_sectors & ~0xfffff) + * + * Warn the user, lest they think we're just buggy. + */ + printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" + " BIOS CORRUPTS DATA on all attached drives," + " regardless of if/how they are configured." + " BEWARE!\n"); + printk(KERN_WARNING DRV_NAME ": For data safety, do not" + " use sectors 8-9 on \"Legacy\" drives," + " and avoid the final two gigabytes on" + " all RocketRAID BIOS initialized drives.\n"); + } + /* drop through */ case chip_6042: hpriv->ops = &mv6xxx_ops; - hp_flags |= MV_HP_GEN_IIE; + if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) + hp_flags |= MV_HP_CUT_THROUGH; - switch (rev_id) { - case 0x0: - hp_flags |= MV_HP_ERRATA_XX42A0; - break; - case 0x1: + switch (pdev->revision) { + case 0x2: /* Rev.B0: the first/only public release */ hp_flags |= MV_HP_ERRATA_60X1C0; break; default: - dev_printk(KERN_WARNING, &pdev->dev, - "Applying 60X1C0 workarounds to unknown rev\n"); + dev_warn(&pdev->dev, + "Applying 60X1C0 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_60X1C0; break; } break; + case chip_soc: + if (soc_is_65n(hpriv)) + hpriv->ops = &mv_soc_65n_ops; + else + hpriv->ops = &mv_soc_ops; + hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | + MV_HP_ERRATA_60X1C0; + break; default: - printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); + dev_err(host->dev, "BUG: invalid board index %u\n", board_idx); return 1; } hpriv->hp_flags = hp_flags; + if (hp_flags & MV_HP_PCIE) { + hpriv->irq_cause_offset = PCIE_IRQ_CAUSE; + hpriv->irq_mask_offset = PCIE_IRQ_MASK; + hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; + } else { + hpriv->irq_cause_offset = PCI_IRQ_CAUSE; + hpriv->irq_mask_offset = PCI_IRQ_MASK; + hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; + } return 0; } /** * mv_init_host - Perform some early initialization of the host. - * @pdev: host PCI device - * @probe_ent: early data struct representing the host + * @host: ATA host to initialize * * If possible, do an early global reset of the host. Then do * our port init and clear/unmask all/relevant host interrupts. @@ -2220,50 +3923,49 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, * LOCKING: * Inherited from caller. */ -static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent, - unsigned int board_idx) +static int mv_init_host(struct ata_host *host) { int rc = 0, n_hc, port, hc; - void __iomem *mmio = probe_ent->mmio_base; - struct mv_host_priv *hpriv = probe_ent->private_data; - - /* global interrupt mask */ - writel(0, mmio + HC_MAIN_IRQ_MASK_OFS); + struct mv_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->base; - rc = mv_chip_id(pdev, hpriv, board_idx); + rc = mv_chip_id(host, hpriv->board_idx); if (rc) goto done; - n_hc = mv_get_hc_count(probe_ent->port_flags); - probe_ent->n_ports = MV_PORTS_PER_HC * n_hc; + if (IS_SOC(hpriv)) { + hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE; + hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK; + } else { + hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE; + hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK; + } + + /* initialize shadow irq mask with register's value */ + hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr); + + /* global interrupt mask: 0 == mask everything */ + mv_set_main_irq_mask(host, ~0, 0); - for (port = 0; port < probe_ent->n_ports; port++) - hpriv->ops->read_preamp(hpriv, port, mmio); + n_hc = mv_get_hc_count(host->ports[0]->flags); + + for (port = 0; port < host->n_ports; port++) + if (hpriv->ops->read_preamp) + hpriv->ops->read_preamp(hpriv, port, mmio); rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); if (rc) goto done; hpriv->ops->reset_flash(hpriv, mmio); - hpriv->ops->reset_bus(pdev, mmio); + hpriv->ops->reset_bus(host, mmio); hpriv->ops->enable_leds(hpriv, mmio); - for (port = 0; port < probe_ent->n_ports; port++) { - if (IS_60XX(hpriv)) { - void __iomem *port_mmio = mv_port_base(mmio, port); - - u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); - ifctl |= (1 << 7); /* enable gen2i speed */ - ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ - writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); - } - - hpriv->ops->phy_errata(hpriv, mmio, port); - } - - for (port = 0; port < probe_ent->n_ports; port++) { + for (port = 0; port < host->n_ports; port++) { + struct ata_port *ap = host->ports[port]; void __iomem *port_mmio = mv_port_base(mmio, port); - mv_port_init(&probe_ent->port[port], port_mmio); + + mv_port_init(&ap->ioaddr, port_mmio); } for (hc = 0; hc < n_hc; hc++) { @@ -2271,186 +3973,535 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent, VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " "(before clear)=0x%08x\n", hc, - readl(hc_mmio + HC_CFG_OFS), - readl(hc_mmio + HC_IRQ_CAUSE_OFS)); + readl(hc_mmio + HC_CFG), + readl(hc_mmio + HC_IRQ_CAUSE)); /* Clear any currently outstanding hc interrupt conditions */ - writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); + writelfl(0, hc_mmio + HC_IRQ_CAUSE); } - /* Clear any currently outstanding host interrupt conditions */ - writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); + if (!IS_SOC(hpriv)) { + /* Clear any currently outstanding host interrupt conditions */ + writelfl(0, mmio + hpriv->irq_cause_offset); - /* and unmask interrupt generation for host regs */ - writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); - writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); - - VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " - "PCI int cause/mask=0x%08x/0x%08x\n", - readl(mmio + HC_MAIN_IRQ_CAUSE_OFS), - readl(mmio + HC_MAIN_IRQ_MASK_OFS), - readl(mmio + PCI_IRQ_CAUSE_OFS), - readl(mmio + PCI_IRQ_MASK_OFS)); + /* and unmask interrupt generation for host regs */ + writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset); + } + /* + * enable only global host interrupts for now. + * The per-port interrupts get done later as ports are set up. + */ + mv_set_main_irq_mask(host, 0, PCI_ERR); + mv_set_irq_coalescing(host, irq_coalescing_io_count, + irq_coalescing_usecs); done: return rc; } +static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) +{ + hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, + MV_CRQB_Q_SZ, 0); + if (!hpriv->crqb_pool) + return -ENOMEM; + + hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, + MV_CRPB_Q_SZ, 0); + if (!hpriv->crpb_pool) + return -ENOMEM; + + hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, + MV_SG_TBL_SZ, 0); + if (!hpriv->sg_tbl_pool) + return -ENOMEM; + + return 0; +} + +static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, + const struct mbus_dram_target_info *dram) +{ + int i; + + for (i = 0; i < 4; i++) { + writel(0, hpriv->base + WINDOW_CTRL(i)); + writel(0, hpriv->base + WINDOW_BASE(i)); + } + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + writel(((cs->size - 1) & 0xffff0000) | + (cs->mbus_attr << 8) | + (dram->mbus_dram_target_id << 4) | 1, + hpriv->base + WINDOW_CTRL(i)); + writel(cs->base, hpriv->base + WINDOW_BASE(i)); + } +} + +/** + * mv_platform_probe - handle a positive probe of an soc Marvell + * host + * @pdev: platform device found + * + * LOCKING: + * Inherited from caller. + */ +static int mv_platform_probe(struct platform_device *pdev) +{ + const struct mv_sata_platform_data *mv_platform_data; + const struct mbus_dram_target_info *dram; + const struct ata_port_info *ppi[] = + { &mv_port_info[chip_soc], NULL }; + struct ata_host *host; + struct mv_host_priv *hpriv; + struct resource *res; + int n_ports = 0, irq = 0; + int rc; + int port; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + /* + * Simple resource validation .. + */ + if (unlikely(pdev->num_resources != 2)) { + dev_err(&pdev->dev, "invalid number of resources\n"); + return -EINVAL; + } + + /* + * Get the register base first + */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) + return -EINVAL; + + /* allocate host */ + if (pdev->dev.of_node) { + of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports); + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + } else { + mv_platform_data = dev_get_platdata(&pdev->dev); + n_ports = mv_platform_data->n_ports; + irq = platform_get_irq(pdev, 0); + } + + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); + hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); + + if (!host || !hpriv) + return -ENOMEM; + hpriv->port_clks = devm_kzalloc(&pdev->dev, + sizeof(struct clk *) * n_ports, + GFP_KERNEL); + if (!hpriv->port_clks) + return -ENOMEM; + hpriv->port_phys = devm_kzalloc(&pdev->dev, + sizeof(struct phy *) * n_ports, + GFP_KERNEL); + if (!hpriv->port_phys) + return -ENOMEM; + host->private_data = hpriv; + hpriv->board_idx = chip_soc; + + host->iomap = NULL; + hpriv->base = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + hpriv->base -= SATAHC0_REG_BASE; + + hpriv->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(hpriv->clk)) + dev_notice(&pdev->dev, "cannot get optional clkdev\n"); + else + clk_prepare_enable(hpriv->clk); + + for (port = 0; port < n_ports; port++) { + char port_number[16]; + sprintf(port_number, "%d", port); + hpriv->port_clks[port] = clk_get(&pdev->dev, port_number); + if (!IS_ERR(hpriv->port_clks[port])) + clk_prepare_enable(hpriv->port_clks[port]); + + sprintf(port_number, "port%d", port); + hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev, + port_number); + if (IS_ERR(hpriv->port_phys[port])) { + rc = PTR_ERR(hpriv->port_phys[port]); + hpriv->port_phys[port] = NULL; + if (rc != -EPROBE_DEFER) + dev_warn(&pdev->dev, "error getting phy %d", rc); + + /* Cleanup only the initialized ports */ + hpriv->n_ports = port; + goto err; + } else + phy_power_on(hpriv->port_phys[port]); + } + + /* All the ports have been initialized */ + hpriv->n_ports = n_ports; + + /* + * (Re-)program MBUS remapping windows if we are asked to. + */ + dram = mv_mbus_dram_info(); + if (dram) + mv_conf_mbus_windows(hpriv, dram); + + rc = mv_create_dma_pools(hpriv, &pdev->dev); + if (rc) + goto err; + + /* + * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be + * updated in the LP_PHY_CTL register. + */ + if (pdev->dev.of_node && + of_device_is_compatible(pdev->dev.of_node, + "marvell,armada-370-sata")) + hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL; + + /* initialize adapter */ + rc = mv_init_host(host); + if (rc) + goto err; + + dev_info(&pdev->dev, "slots %u ports %d\n", + (unsigned)MV_MAX_Q_DEPTH, host->n_ports); + + rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht); + if (!rc) + return 0; + +err: + if (!IS_ERR(hpriv->clk)) { + clk_disable_unprepare(hpriv->clk); + clk_put(hpriv->clk); + } + for (port = 0; port < hpriv->n_ports; port++) { + if (!IS_ERR(hpriv->port_clks[port])) { + clk_disable_unprepare(hpriv->port_clks[port]); + clk_put(hpriv->port_clks[port]); + } + if (hpriv->port_phys[port]) + phy_power_off(hpriv->port_phys[port]); + } + + return rc; +} + +/* + * + * mv_platform_remove - unplug a platform interface + * @pdev: platform device + * + * A platform bus SATA device has been unplugged. Perform the needed + * cleanup. Also called on module unload for any active devices. + */ +static int mv_platform_remove(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + struct mv_host_priv *hpriv = host->private_data; + int port; + ata_host_detach(host); + + if (!IS_ERR(hpriv->clk)) { + clk_disable_unprepare(hpriv->clk); + clk_put(hpriv->clk); + } + for (port = 0; port < host->n_ports; port++) { + if (!IS_ERR(hpriv->port_clks[port])) { + clk_disable_unprepare(hpriv->port_clks[port]); + clk_put(hpriv->port_clks[port]); + } + if (hpriv->port_phys[port]) + phy_power_off(hpriv->port_phys[port]); + } + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct ata_host *host = platform_get_drvdata(pdev); + if (host) + return ata_host_suspend(host, state); + else + return 0; +} + +static int mv_platform_resume(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + const struct mbus_dram_target_info *dram; + int ret; + + if (host) { + struct mv_host_priv *hpriv = host->private_data; + + /* + * (Re-)program MBUS remapping windows if we are asked to. + */ + dram = mv_mbus_dram_info(); + if (dram) + mv_conf_mbus_windows(hpriv, dram); + + /* initialize adapter */ + ret = mv_init_host(host); + if (ret) { + printk(KERN_ERR DRV_NAME ": Error during HW init\n"); + return ret; + } + ata_host_resume(host); + } + + return 0; +} +#else +#define mv_platform_suspend NULL +#define mv_platform_resume NULL +#endif + +#ifdef CONFIG_OF +static struct of_device_id mv_sata_dt_ids[] = { + { .compatible = "marvell,armada-370-sata", }, + { .compatible = "marvell,orion-sata", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mv_sata_dt_ids); +#endif + +static struct platform_driver mv_platform_driver = { + .probe = mv_platform_probe, + .remove = mv_platform_remove, + .suspend = mv_platform_suspend, + .resume = mv_platform_resume, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(mv_sata_dt_ids), + }, +}; + + +#ifdef CONFIG_PCI +static int mv_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent); +#ifdef CONFIG_PM_SLEEP +static int mv_pci_device_resume(struct pci_dev *pdev); +#endif + + +static struct pci_driver mv_pci_driver = { + .name = DRV_NAME, + .id_table = mv_pci_tbl, + .probe = mv_pci_init_one, + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = mv_pci_device_resume, +#endif + +}; + +/* move to PCI layer or libata core? */ +static int pci_go_64(struct pci_dev *pdev) +{ + int rc; + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, + "64-bit DMA enable failed\n"); + return rc; + } + } + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, "32-bit DMA enable failed\n"); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, + "32-bit consistent DMA enable failed\n"); + return rc; + } + } + + return rc; +} + /** * mv_print_info - Dump key info to kernel log for perusal. - * @probe_ent: early data struct representing the host + * @host: ATA host to print info about * * FIXME: complete this. * * LOCKING: * Inherited from caller. */ -static void mv_print_info(struct ata_probe_ent *probe_ent) +static void mv_print_info(struct ata_host *host) { - struct pci_dev *pdev = to_pci_dev(probe_ent->dev); - struct mv_host_priv *hpriv = probe_ent->private_data; - u8 rev_id, scc; - const char *scc_s; + struct pci_dev *pdev = to_pci_dev(host->dev); + struct mv_host_priv *hpriv = host->private_data; + u8 scc; + const char *scc_s, *gen; /* Use this to determine the HW stepping of the chip so we know * what errata to workaround */ - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); - pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); if (scc == 0) scc_s = "SCSI"; else if (scc == 0x01) scc_s = "RAID"; else - scc_s = "unknown"; + scc_s = "?"; + + if (IS_GEN_I(hpriv)) + gen = "I"; + else if (IS_GEN_II(hpriv)) + gen = "II"; + else if (IS_GEN_IIE(hpriv)) + gen = "IIE"; + else + gen = "?"; - dev_printk(KERN_INFO, &pdev->dev, - "%u slots %u ports %s mode IRQ via %s\n", - (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports, - scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); + dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n", + gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, + scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); } /** - * mv_init_one - handle a positive probe of a Marvell host + * mv_pci_init_one - handle a positive probe of a PCI Marvell host * @pdev: PCI device found * @ent: PCI device ID entry for the matched host * * LOCKING: * Inherited from caller. */ -static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +static int mv_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { - static int printed_version = 0; - struct ata_probe_ent *probe_ent = NULL; - struct mv_host_priv *hpriv; unsigned int board_idx = (unsigned int)ent->driver_data; - void __iomem *mmio_base; - int pci_dev_busy = 0, rc; + const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; + struct ata_host *host; + struct mv_host_priv *hpriv; + int n_ports, port, rc; - if (!printed_version++) - dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - rc = pci_enable_device(pdev); - if (rc) { - return rc; - } - pci_set_master(pdev); + /* allocate host */ + n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - pci_dev_busy = 1; - goto err_out; - } + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); + hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); + if (!host || !hpriv) + return -ENOMEM; + host->private_data = hpriv; + hpriv->n_ports = n_ports; + hpriv->board_idx = board_idx; - probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); - if (probe_ent == NULL) { - rc = -ENOMEM; - goto err_out_regions; - } + /* acquire resources */ + rc = pcim_enable_device(pdev); + if (rc) + return rc; - memset(probe_ent, 0, sizeof(*probe_ent)); - probe_ent->dev = pci_dev_to_dev(pdev); - INIT_LIST_HEAD(&probe_ent->node); + rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + host->iomap = pcim_iomap_table(pdev); + hpriv->base = host->iomap[MV_PRIMARY_BAR]; - mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0); - if (mmio_base == NULL) { - rc = -ENOMEM; - goto err_out_free_ent; - } + rc = pci_go_64(pdev); + if (rc) + return rc; - hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL); - if (!hpriv) { - rc = -ENOMEM; - goto err_out_iounmap; - } - memset(hpriv, 0, sizeof(*hpriv)); + rc = mv_create_dma_pools(hpriv, &pdev->dev); + if (rc) + return rc; - probe_ent->sht = mv_port_info[board_idx].sht; - probe_ent->port_flags = mv_port_info[board_idx].flags; - probe_ent->pio_mask = mv_port_info[board_idx].pio_mask; - probe_ent->udma_mask = mv_port_info[board_idx].udma_mask; - probe_ent->port_ops = mv_port_info[board_idx].port_ops; + for (port = 0; port < host->n_ports; port++) { + struct ata_port *ap = host->ports[port]; + void __iomem *port_mmio = mv_port_base(hpriv->base, port); + unsigned int offset = port_mmio - hpriv->base; - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->mmio_base = mmio_base; - probe_ent->private_data = hpriv; + ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); + ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); + } /* initialize adapter */ - rc = mv_init_host(pdev, probe_ent, board_idx); - if (rc) { - goto err_out_hpriv; - } + rc = mv_init_host(host); + if (rc) + return rc; - /* Enable interrupts */ - if (msi && pci_enable_msi(pdev) == 0) { + /* Enable message-switched interrupts, if requested */ + if (msi && pci_enable_msi(pdev) == 0) hpriv->hp_flags |= MV_HP_FLAG_MSI; - } else { - pci_intx(pdev, 1); - } mv_dump_pci_cfg(pdev, 0x68); - mv_print_info(probe_ent); + mv_print_info(host); - if (ata_device_add(probe_ent) == 0) { - rc = -ENODEV; /* No devices discovered */ - goto err_out_dev_add; - } + pci_set_master(pdev); + pci_try_set_mwi(pdev); + return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, + IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); +} - kfree(probe_ent); - return 0; +#ifdef CONFIG_PM_SLEEP +static int mv_pci_device_resume(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int rc; -err_out_dev_add: - if (MV_HP_FLAG_MSI & hpriv->hp_flags) { - pci_disable_msi(pdev); - } else { - pci_intx(pdev, 0); - } -err_out_hpriv: - kfree(hpriv); -err_out_iounmap: - pci_iounmap(pdev, mmio_base); -err_out_free_ent: - kfree(probe_ent); -err_out_regions: - pci_release_regions(pdev); -err_out: - if (!pci_dev_busy) { - pci_disable_device(pdev); - } + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; - return rc; + /* initialize adapter */ + rc = mv_init_host(host); + if (rc) + return rc; + + ata_host_resume(host); + + return 0; } +#endif +#endif static int __init mv_init(void) { - return pci_register_driver(&mv_pci_driver); + int rc = -ENODEV; +#ifdef CONFIG_PCI + rc = pci_register_driver(&mv_pci_driver); + if (rc < 0) + return rc; +#endif + rc = platform_driver_register(&mv_platform_driver); + +#ifdef CONFIG_PCI + if (rc < 0) + pci_unregister_driver(&mv_pci_driver); +#endif + return rc; } static void __exit mv_exit(void) { +#ifdef CONFIG_PCI pci_unregister_driver(&mv_pci_driver); +#endif + platform_driver_unregister(&mv_platform_driver); } MODULE_AUTHOR("Brett Russ"); @@ -2458,9 +4509,7 @@ MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, mv_pci_tbl); MODULE_VERSION(DRV_VERSION); - -module_param(msi, int, 0444); -MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); +MODULE_ALIAS("platform:" DRV_NAME); module_init(mv_init); module_exit(mv_exit); diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 8cd730fe5dd..cdf99fac139 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -29,27 +29,37 @@ * NV-specific details such as register offsets, SATA phy location, * hotplug info, etc. * + * CK804/MCP04 controllers support an alternate programming interface + * similar to the ADMA specification (with some modifications). + * This allows the use of NCQ. Non-DMA-mapped ATA commands are still + * sent through the legacy interface. + * */ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/gfp.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> #include <linux/libata.h> #define DRV_NAME "sata_nv" -#define DRV_VERSION "2.0" +#define DRV_VERSION "3.5" + +#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL enum { + NV_MMIO_BAR = 5, + NV_PORTS = 2, - NV_PIO_MASK = 0x1f, - NV_MWDMA_MASK = 0x07, - NV_UDMA_MASK = 0x7f, + NV_PIO_MASK = ATA_PIO4, + NV_MWDMA_MASK = ATA_MWDMA2, + NV_UDMA_MASK = ATA_UDMA6, NV_PORT0_SCR_REG_OFFSET = 0x00, NV_PORT1_SCR_REG_OFFSET = 0x40, @@ -78,208 +88,507 @@ enum { // For PCI config register 20 NV_MCP_SATA_CFG_20 = 0x50, NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04, + NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17), + NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16), + NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14), + NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12), + + NV_ADMA_MAX_CPBS = 32, + NV_ADMA_CPB_SZ = 128, + NV_ADMA_APRD_SZ = 16, + NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) / + NV_ADMA_APRD_SZ, + NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5, + NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ, + NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS * + (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ), + + /* BAR5 offset to ADMA general registers */ + NV_ADMA_GEN = 0x400, + NV_ADMA_GEN_CTL = 0x00, + NV_ADMA_NOTIFIER_CLEAR = 0x30, + + /* BAR5 offset to ADMA ports */ + NV_ADMA_PORT = 0x480, + + /* size of ADMA port register space */ + NV_ADMA_PORT_SIZE = 0x100, + + /* ADMA port registers */ + NV_ADMA_CTL = 0x40, + NV_ADMA_CPB_COUNT = 0x42, + NV_ADMA_NEXT_CPB_IDX = 0x43, + NV_ADMA_STAT = 0x44, + NV_ADMA_CPB_BASE_LOW = 0x48, + NV_ADMA_CPB_BASE_HIGH = 0x4C, + NV_ADMA_APPEND = 0x50, + NV_ADMA_NOTIFIER = 0x68, + NV_ADMA_NOTIFIER_ERROR = 0x6C, + + /* NV_ADMA_CTL register bits */ + NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0), + NV_ADMA_CTL_CHANNEL_RESET = (1 << 5), + NV_ADMA_CTL_GO = (1 << 7), + NV_ADMA_CTL_AIEN = (1 << 8), + NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11), + NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12), + + /* CPB response flag bits */ + NV_CPB_RESP_DONE = (1 << 0), + NV_CPB_RESP_ATA_ERR = (1 << 3), + NV_CPB_RESP_CMD_ERR = (1 << 4), + NV_CPB_RESP_CPB_ERR = (1 << 7), + + /* CPB control flag bits */ + NV_CPB_CTL_CPB_VALID = (1 << 0), + NV_CPB_CTL_QUEUE = (1 << 1), + NV_CPB_CTL_APRD_VALID = (1 << 2), + NV_CPB_CTL_IEN = (1 << 3), + NV_CPB_CTL_FPDMA = (1 << 4), + + /* APRD flags */ + NV_APRD_WRITE = (1 << 1), + NV_APRD_END = (1 << 2), + NV_APRD_CONT = (1 << 3), + + /* NV_ADMA_STAT flags */ + NV_ADMA_STAT_TIMEOUT = (1 << 0), + NV_ADMA_STAT_HOTUNPLUG = (1 << 1), + NV_ADMA_STAT_HOTPLUG = (1 << 2), + NV_ADMA_STAT_CPBERR = (1 << 4), + NV_ADMA_STAT_SERROR = (1 << 5), + NV_ADMA_STAT_CMD_COMPLETE = (1 << 6), + NV_ADMA_STAT_IDLE = (1 << 8), + NV_ADMA_STAT_LEGACY = (1 << 9), + NV_ADMA_STAT_STOPPED = (1 << 10), + NV_ADMA_STAT_DONE = (1 << 12), + NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR | + NV_ADMA_STAT_TIMEOUT, + + /* port flags */ + NV_ADMA_PORT_REGISTER_MODE = (1 << 0), + NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1), + + /* MCP55 reg offset */ + NV_CTL_MCP55 = 0x400, + NV_INT_STATUS_MCP55 = 0x440, + NV_INT_ENABLE_MCP55 = 0x444, + NV_NCQ_REG_MCP55 = 0x448, + + /* MCP55 */ + NV_INT_ALL_MCP55 = 0xffff, + NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */ + NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd, + + /* SWNCQ ENABLE BITS*/ + NV_CTL_PRI_SWNCQ = 0x02, + NV_CTL_SEC_SWNCQ = 0x04, + + /* SW NCQ status bits*/ + NV_SWNCQ_IRQ_DEV = (1 << 0), + NV_SWNCQ_IRQ_PM = (1 << 1), + NV_SWNCQ_IRQ_ADDED = (1 << 2), + NV_SWNCQ_IRQ_REMOVED = (1 << 3), + + NV_SWNCQ_IRQ_BACKOUT = (1 << 4), + NV_SWNCQ_IRQ_SDBFIS = (1 << 5), + NV_SWNCQ_IRQ_DHREGFIS = (1 << 6), + NV_SWNCQ_IRQ_DMASETUP = (1 << 7), + + NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED | + NV_SWNCQ_IRQ_REMOVED, + }; -static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static void nv_ck804_host_stop(struct ata_host *host); -static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance, - struct pt_regs *regs); -static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance, - struct pt_regs *regs); -static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance, - struct pt_regs *regs); -static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +/* ADMA Physical Region Descriptor - one SG segment */ +struct nv_adma_prd { + __le64 addr; + __le32 len; + u8 flags; + u8 packet_len; + __le16 reserved; +}; + +enum nv_adma_regbits { + CMDEND = (1 << 15), /* end of command list */ + WNB = (1 << 14), /* wait-not-BSY */ + IGN = (1 << 13), /* ignore this entry */ + CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */ + DA2 = (1 << (2 + 8)), + DA1 = (1 << (1 + 8)), + DA0 = (1 << (0 + 8)), +}; + +/* ADMA Command Parameter Block + The first 5 SG segments are stored inside the Command Parameter Block itself. + If there are more than 5 segments the remainder are stored in a separate + memory area indicated by next_aprd. */ +struct nv_adma_cpb { + u8 resp_flags; /* 0 */ + u8 reserved1; /* 1 */ + u8 ctl_flags; /* 2 */ + /* len is length of taskfile in 64 bit words */ + u8 len; /* 3 */ + u8 tag; /* 4 */ + u8 next_cpb_idx; /* 5 */ + __le16 reserved2; /* 6-7 */ + __le16 tf[12]; /* 8-31 */ + struct nv_adma_prd aprd[5]; /* 32-111 */ + __le64 next_aprd; /* 112-119 */ + __le64 reserved3; /* 120-127 */ +}; + + +struct nv_adma_port_priv { + struct nv_adma_cpb *cpb; + dma_addr_t cpb_dma; + struct nv_adma_prd *aprd; + dma_addr_t aprd_dma; + void __iomem *ctl_block; + void __iomem *gen_block; + void __iomem *notifier_clear_block; + u64 adma_dma_mask; + u8 flags; + int last_issue_ncq; +}; + +struct nv_host_priv { + unsigned long type; +}; + +struct defer_queue { + u32 defer_bits; + unsigned int head; + unsigned int tail; + unsigned int tag[ATA_MAX_QUEUE]; +}; + +enum ncq_saw_flag_list { + ncq_saw_d2h = (1U << 0), + ncq_saw_dmas = (1U << 1), + ncq_saw_sdb = (1U << 2), + ncq_saw_backout = (1U << 3), +}; + +struct nv_swncq_port_priv { + struct ata_bmdma_prd *prd; /* our SG list */ + dma_addr_t prd_dma; /* and its DMA mapping */ + void __iomem *sactive_block; + void __iomem *irq_block; + void __iomem *tag_block; + u32 qc_active; + + unsigned int last_issue_tag; + + /* fifo circular queue to store deferral command */ + struct defer_queue defer_queue; + + /* for NCQ interrupt analysis */ + u32 dhfis_bits; + u32 dmafis_bits; + u32 sdbfis_bits; + + unsigned int ncq_flags; +}; + + +#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT))))) +static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +#ifdef CONFIG_PM_SLEEP +static int nv_pci_device_resume(struct pci_dev *pdev); +#endif +static void nv_ck804_host_stop(struct ata_host *host); +static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); +static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); +static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); +static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); +static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); + +static int nv_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); static void nv_nf2_freeze(struct ata_port *ap); static void nv_nf2_thaw(struct ata_port *ap); static void nv_ck804_freeze(struct ata_port *ap); static void nv_ck804_thaw(struct ata_port *ap); -static void nv_error_handler(struct ata_port *ap); +static int nv_adma_slave_config(struct scsi_device *sdev); +static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); +static void nv_adma_qc_prep(struct ata_queued_cmd *qc); +static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); +static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); +static void nv_adma_irq_clear(struct ata_port *ap); +static int nv_adma_port_start(struct ata_port *ap); +static void nv_adma_port_stop(struct ata_port *ap); +#ifdef CONFIG_PM +static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg); +static int nv_adma_port_resume(struct ata_port *ap); +#endif +static void nv_adma_freeze(struct ata_port *ap); +static void nv_adma_thaw(struct ata_port *ap); +static void nv_adma_error_handler(struct ata_port *ap); +static void nv_adma_host_stop(struct ata_host *host); +static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc); +static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf); + +static void nv_mcp55_thaw(struct ata_port *ap); +static void nv_mcp55_freeze(struct ata_port *ap); +static void nv_swncq_error_handler(struct ata_port *ap); +static int nv_swncq_slave_config(struct scsi_device *sdev); +static int nv_swncq_port_start(struct ata_port *ap); +static void nv_swncq_qc_prep(struct ata_queued_cmd *qc); +static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); +static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc); +static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis); +static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance); +#ifdef CONFIG_PM +static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg); +static int nv_swncq_port_resume(struct ata_port *ap); +#endif enum nv_host_type { GENERIC, NFORCE2, NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */ - CK804 + CK804, + ADMA, + MCP5x, + SWNCQ, }; static const struct pci_device_id nv_pci_tbl[] = { - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, - PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, - PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC }, - { 0, } /* terminate list */ + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC }, + + { } /* terminate list */ }; static struct pci_driver nv_pci_driver = { .name = DRV_NAME, .id_table = nv_pci_tbl, .probe = nv_init_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = nv_pci_device_resume, +#endif .remove = ata_pci_remove_one, }; static struct scsi_host_template nv_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct scsi_host_template nv_adma_sht = { + ATA_NCQ_SHT(DRV_NAME), + .can_queue = NV_ADMA_MAX_CPBS, + .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN, + .dma_boundary = NV_ADMA_DMA_BOUNDARY, + .slave_configure = nv_adma_slave_config, +}; + +static struct scsi_host_template nv_swncq_sht = { + ATA_NCQ_SHT(DRV_NAME), + .can_queue = ATA_MAX_QUEUE, .sg_tablesize = LIBATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, + .slave_configure = nv_swncq_slave_config, }; -static const struct ata_port_operations nv_generic_ops = { - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .exec_command = ata_exec_command, - .check_status = ata_check_status, - .dev_select = ata_std_dev_select, - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = nv_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - .data_xfer = ata_pio_data_xfer, - .irq_handler = nv_generic_interrupt, - .irq_clear = ata_bmdma_irq_clear, +/* + * NV SATA controllers have various different problems with hardreset + * protocol depending on the specific controller and device. + * + * GENERIC: + * + * bko11195 reports that link doesn't come online after hardreset on + * generic nv's and there have been several other similar reports on + * linux-ide. + * + * bko12351#c23 reports that warmplug on MCP61 doesn't work with + * softreset. + * + * NF2/3: + * + * bko3352 reports nf2/3 controllers can't determine device signature + * reliably after hardreset. The following thread reports detection + * failure on cold boot with the standard debouncing timing. + * + * http://thread.gmane.org/gmane.linux.ide/34098 + * + * bko12176 reports that hardreset fails to bring up the link during + * boot on nf2. + * + * CK804: + * + * For initial probing after boot and hot plugging, hardreset mostly + * works fine on CK804 but curiously, reprobing on the initial port + * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg + * FIS in somewhat undeterministic way. + * + * SWNCQ: + * + * bko12351 reports that when SWNCQ is enabled, for hotplug to work, + * hardreset should be used and hardreset can't report proper + * signature, which suggests that mcp5x is closer to nf2 as long as + * reset quirkiness is concerned. + * + * bko12703 reports that boot probing fails for intel SSD with + * hardreset. Link fails to come online. Softreset works fine. + * + * The failures are varied but the following patterns seem true for + * all flavors. + * + * - Softreset during boot always works. + * + * - Hardreset during boot sometimes fails to bring up the link on + * certain comibnations and device signature acquisition is + * unreliable. + * + * - Hardreset is often necessary after hotplug. + * + * So, preferring softreset for boot probing and error handling (as + * hardreset might bring down the link) but using hardreset for + * post-boot probing should work around the above issues in most + * cases. Define nv_hardreset() which only kicks in for post-boot + * probing and use it for all variants. + */ +static struct ata_port_operations nv_generic_ops = { + .inherits = &ata_bmdma_port_ops, + .lost_interrupt = ATA_OP_NULL, .scr_read = nv_scr_read, .scr_write = nv_scr_write, - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_pci_host_stop, + .hardreset = nv_hardreset, }; -static const struct ata_port_operations nv_nf2_ops = { - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .exec_command = ata_exec_command, - .check_status = ata_check_status, - .dev_select = ata_std_dev_select, - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, +static struct ata_port_operations nv_nf2_ops = { + .inherits = &nv_generic_ops, .freeze = nv_nf2_freeze, .thaw = nv_nf2_thaw, - .error_handler = nv_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - .data_xfer = ata_pio_data_xfer, - .irq_handler = nv_nf2_interrupt, - .irq_clear = ata_bmdma_irq_clear, - .scr_read = nv_scr_read, - .scr_write = nv_scr_write, - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_pci_host_stop, }; -static const struct ata_port_operations nv_ck804_ops = { - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .exec_command = ata_exec_command, - .check_status = ata_check_status, - .dev_select = ata_std_dev_select, - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, +static struct ata_port_operations nv_ck804_ops = { + .inherits = &nv_generic_ops, .freeze = nv_ck804_freeze, .thaw = nv_ck804_thaw, - .error_handler = nv_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - .data_xfer = ata_pio_data_xfer, - .irq_handler = nv_ck804_interrupt, - .irq_clear = ata_bmdma_irq_clear, - .scr_read = nv_scr_read, - .scr_write = nv_scr_write, - .port_start = ata_port_start, - .port_stop = ata_port_stop, .host_stop = nv_ck804_host_stop, }; -static struct ata_port_info nv_port_info[] = { +static struct ata_port_operations nv_adma_ops = { + .inherits = &nv_ck804_ops, + + .check_atapi_dma = nv_adma_check_atapi_dma, + .sff_tf_read = nv_adma_tf_read, + .qc_defer = ata_std_qc_defer, + .qc_prep = nv_adma_qc_prep, + .qc_issue = nv_adma_qc_issue, + .sff_irq_clear = nv_adma_irq_clear, + + .freeze = nv_adma_freeze, + .thaw = nv_adma_thaw, + .error_handler = nv_adma_error_handler, + .post_internal_cmd = nv_adma_post_internal_cmd, + + .port_start = nv_adma_port_start, + .port_stop = nv_adma_port_stop, +#ifdef CONFIG_PM + .port_suspend = nv_adma_port_suspend, + .port_resume = nv_adma_port_resume, +#endif + .host_stop = nv_adma_host_stop, +}; + +static struct ata_port_operations nv_swncq_ops = { + .inherits = &nv_generic_ops, + + .qc_defer = ata_std_qc_defer, + .qc_prep = nv_swncq_qc_prep, + .qc_issue = nv_swncq_qc_issue, + + .freeze = nv_mcp55_freeze, + .thaw = nv_mcp55_thaw, + .error_handler = nv_swncq_error_handler, + +#ifdef CONFIG_PM + .port_suspend = nv_swncq_port_suspend, + .port_resume = nv_swncq_port_resume, +#endif + .port_start = nv_swncq_port_start, +}; + +struct nv_pi_priv { + irq_handler_t irq_handler; + struct scsi_host_template *sht; +}; + +#define NV_PI_PRIV(_irq_handler, _sht) \ + &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht } + +static const struct ata_port_info nv_port_info[] = { /* generic */ { - .sht = &nv_sht, - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, + .flags = ATA_FLAG_SATA, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_generic_ops, + .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht), }, /* nforce2/3 */ { - .sht = &nv_sht, - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, + .flags = ATA_FLAG_SATA, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_nf2_ops, + .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht), }, /* ck804 */ { - .sht = &nv_sht, - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, + .flags = ATA_FLAG_SATA, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_ck804_ops, + .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht), + }, + /* ADMA */ + { + .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, + .pio_mask = NV_PIO_MASK, + .mwdma_mask = NV_MWDMA_MASK, + .udma_mask = NV_UDMA_MASK, + .port_ops = &nv_adma_ops, + .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht), + }, + /* MCP5x */ + { + .flags = ATA_FLAG_SATA, + .pio_mask = NV_PIO_MASK, + .mwdma_mask = NV_MWDMA_MASK, + .udma_mask = NV_UDMA_MASK, + .port_ops = &nv_generic_ops, + .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht), + }, + /* SWNCQ */ + { + .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, + .pio_mask = NV_PIO_MASK, + .mwdma_mask = NV_MWDMA_MASK, + .udma_mask = NV_UDMA_MASK, + .port_ops = &nv_swncq_ops, + .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht), }, }; @@ -289,44 +598,288 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, nv_pci_tbl); MODULE_VERSION(DRV_VERSION); -static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance, - struct pt_regs *regs) +static bool adma_enabled; +static bool swncq_enabled = 1; +static bool msi_enabled; + +static void nv_adma_register_mode(struct ata_port *ap) { - struct ata_host *host = dev_instance; - unsigned int i; - unsigned int handled = 0; - unsigned long flags; + struct nv_adma_port_priv *pp = ap->private_data; + void __iomem *mmio = pp->ctl_block; + u16 tmp, status; + int count = 0; - spin_lock_irqsave(&host->lock, flags); + if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) + return; - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap; + status = readw(mmio + NV_ADMA_STAT); + while (!(status & NV_ADMA_STAT_IDLE) && count < 20) { + ndelay(50); + status = readw(mmio + NV_ADMA_STAT); + count++; + } + if (count == 20) + ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n", + status); + + tmp = readw(mmio + NV_ADMA_CTL); + writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); + + count = 0; + status = readw(mmio + NV_ADMA_STAT); + while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) { + ndelay(50); + status = readw(mmio + NV_ADMA_STAT); + count++; + } + if (count == 20) + ata_port_warn(ap, + "timeout waiting for ADMA LEGACY, stat=0x%hx\n", + status); - ap = host->ports[i]; - if (ap && - !(ap->flags & ATA_FLAG_DISABLED)) { - struct ata_queued_cmd *qc; + pp->flags |= NV_ADMA_PORT_REGISTER_MODE; +} - qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) - handled += ata_host_intr(ap, qc); - else - // No request pending? Clear interrupt status - // anyway, in case there's one pending. - ap->ops->check_status(ap); - } +static void nv_adma_mode(struct ata_port *ap) +{ + struct nv_adma_port_priv *pp = ap->private_data; + void __iomem *mmio = pp->ctl_block; + u16 tmp, status; + int count = 0; + + if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) + return; + + WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); + tmp = readw(mmio + NV_ADMA_CTL); + writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); + + status = readw(mmio + NV_ADMA_STAT); + while (((status & NV_ADMA_STAT_LEGACY) || + !(status & NV_ADMA_STAT_IDLE)) && count < 20) { + ndelay(50); + status = readw(mmio + NV_ADMA_STAT); + count++; } + if (count == 20) + ata_port_warn(ap, + "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n", + status); - spin_unlock_irqrestore(&host->lock, flags); + pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE; +} - return IRQ_RETVAL(handled); +static int nv_adma_slave_config(struct scsi_device *sdev) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct nv_adma_port_priv *pp = ap->private_data; + struct nv_adma_port_priv *port0, *port1; + struct scsi_device *sdev0, *sdev1; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + unsigned long segment_boundary, flags; + unsigned short sg_tablesize; + int rc; + int adma_enable; + u32 current_reg, new_reg, config_mask; + + rc = ata_scsi_slave_config(sdev); + + if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) + /* Not a proper libata device, ignore */ + return rc; + + spin_lock_irqsave(ap->lock, flags); + + if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) { + /* + * NVIDIA reports that ADMA mode does not support ATAPI commands. + * Therefore ATAPI commands are sent through the legacy interface. + * However, the legacy interface only supports 32-bit DMA. + * Restrict DMA parameters as required by the legacy interface + * when an ATAPI device is connected. + */ + segment_boundary = ATA_DMA_BOUNDARY; + /* Subtract 1 since an extra entry may be needed for padding, see + libata-scsi.c */ + sg_tablesize = LIBATA_MAX_PRD - 1; + + /* Since the legacy DMA engine is in use, we need to disable ADMA + on the port. */ + adma_enable = 0; + nv_adma_register_mode(ap); + } else { + segment_boundary = NV_ADMA_DMA_BOUNDARY; + sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; + adma_enable = 1; + } + + pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg); + + if (ap->port_no == 1) + config_mask = NV_MCP_SATA_CFG_20_PORT1_EN | + NV_MCP_SATA_CFG_20_PORT1_PWB_EN; + else + config_mask = NV_MCP_SATA_CFG_20_PORT0_EN | + NV_MCP_SATA_CFG_20_PORT0_PWB_EN; + + if (adma_enable) { + new_reg = current_reg | config_mask; + pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; + } else { + new_reg = current_reg & ~config_mask; + pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; + } + + if (current_reg != new_reg) + pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); + + port0 = ap->host->ports[0]->private_data; + port1 = ap->host->ports[1]->private_data; + sdev0 = ap->host->ports[0]->link.device[0].sdev; + sdev1 = ap->host->ports[1]->link.device[0].sdev; + if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || + (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { + /** We have to set the DMA mask to 32-bit if either port is in + ATAPI mode, since they are on the same PCI device which is + used for DMA mapping. If we set the mask we also need to set + the bounce limit on both ports to ensure that the block + layer doesn't feed addresses that cause DMA mapping to + choke. If either SCSI device is not allocated yet, it's OK + since that port will discover its correct setting when it + does get allocated. + Note: Setting 32-bit mask should not fail. */ + if (sdev0) + blk_queue_bounce_limit(sdev0->request_queue, + ATA_DMA_MASK); + if (sdev1) + blk_queue_bounce_limit(sdev1->request_queue, + ATA_DMA_MASK); + + pci_set_dma_mask(pdev, ATA_DMA_MASK); + } else { + /** This shouldn't fail as it was set to this value before */ + pci_set_dma_mask(pdev, pp->adma_dma_mask); + if (sdev0) + blk_queue_bounce_limit(sdev0->request_queue, + pp->adma_dma_mask); + if (sdev1) + blk_queue_bounce_limit(sdev1->request_queue, + pp->adma_dma_mask); + } + + blk_queue_segment_boundary(sdev->request_queue, segment_boundary); + blk_queue_max_segments(sdev->request_queue, sg_tablesize); + ata_port_info(ap, + "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n", + (unsigned long long)*ap->host->dev->dma_mask, + segment_boundary, sg_tablesize); + + spin_unlock_irqrestore(ap->lock, flags); + + return rc; +} + +static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc) +{ + struct nv_adma_port_priv *pp = qc->ap->private_data; + return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); +} + +static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + /* Other than when internal or pass-through commands are executed, + the only time this function will be called in ADMA mode will be + if a command fails. In the failure case we don't care about going + into register mode with ADMA commands pending, as the commands will + all shortly be aborted anyway. We assume that NCQ commands are not + issued via passthrough, which is the only way that switching into + ADMA mode could abort outstanding commands. */ + nv_adma_register_mode(ap); + + ata_sff_tf_read(ap, tf); +} + +static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb) +{ + unsigned int idx = 0; + + if (tf->flags & ATA_TFLAG_ISADDR) { + if (tf->flags & ATA_TFLAG_LBA48) { + cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB); + cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); + cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal); + cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam); + cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah); + cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature); + } else + cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB); + + cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect); + cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal); + cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam); + cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) + cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device); + + cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); + + while (idx < 12) + cpb[idx++] = cpu_to_le16(IGN); + + return idx; +} + +static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) +{ + struct nv_adma_port_priv *pp = ap->private_data; + u8 flags = pp->cpb[cpb_num].resp_flags; + + VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags); + + if (unlikely((force_err || + flags & (NV_CPB_RESP_ATA_ERR | + NV_CPB_RESP_CMD_ERR | + NV_CPB_RESP_CPB_ERR)))) { + struct ata_eh_info *ehi = &ap->link.eh_info; + int freeze = 0; + + ata_ehi_clear_desc(ehi); + __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags); + if (flags & NV_CPB_RESP_ATA_ERR) { + ata_ehi_push_desc(ehi, "ATA error"); + ehi->err_mask |= AC_ERR_DEV; + } else if (flags & NV_CPB_RESP_CMD_ERR) { + ata_ehi_push_desc(ehi, "CMD error"); + ehi->err_mask |= AC_ERR_DEV; + } else if (flags & NV_CPB_RESP_CPB_ERR) { + ata_ehi_push_desc(ehi, "CPB error"); + ehi->err_mask |= AC_ERR_SYSTEM; + freeze = 1; + } else { + /* notifier error, but no error in CPB flags? */ + ata_ehi_push_desc(ehi, "unknown"); + ehi->err_mask |= AC_ERR_OTHER; + freeze = 1; + } + /* Kill all commands. EH will determine what actually failed. */ + if (freeze) + ata_port_freeze(ap); + else + ata_port_abort(ap); + return -1; + } + + if (likely(flags & NV_CPB_RESP_DONE)) + return 1; + return 0; } static int nv_host_intr(struct ata_port *ap, u8 irq_stat) { - struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); - int handled; + struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); /* freeze if hotplugged */ if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) { @@ -340,109 +893,733 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat) /* DEV interrupt w/ no active qc? */ if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { - ata_check_status(ap); + ata_sff_check_status(ap); return 1; } /* handle interrupt */ - handled = ata_host_intr(ap, qc); - if (unlikely(!handled)) { - /* spurious, clear it */ - ata_check_status(ap); + return ata_bmdma_port_intr(ap, qc); +} + +static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + int i, handled = 0; + u32 notifier_clears[2]; + + spin_lock(&host->lock); + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + struct nv_adma_port_priv *pp = ap->private_data; + void __iomem *mmio = pp->ctl_block; + u16 status; + u32 gen_ctl; + u32 notifier, notifier_error; + + notifier_clears[i] = 0; + + /* if ADMA is disabled, use standard ata interrupt handler */ + if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { + u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) + >> (NV_INT_PORT_SHIFT * i); + handled += nv_host_intr(ap, irq_stat); + continue; + } + + /* if in ATA register mode, check for standard interrupts */ + if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { + u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) + >> (NV_INT_PORT_SHIFT * i); + if (ata_tag_valid(ap->link.active_tag)) + /** NV_INT_DEV indication seems unreliable + at times at least in ADMA mode. Force it + on always when a command is active, to + prevent losing interrupts. */ + irq_stat |= NV_INT_DEV; + handled += nv_host_intr(ap, irq_stat); + } + + notifier = readl(mmio + NV_ADMA_NOTIFIER); + notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); + notifier_clears[i] = notifier | notifier_error; + + gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); + + if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && + !notifier_error) + /* Nothing to do */ + continue; + + status = readw(mmio + NV_ADMA_STAT); + + /* + * Clear status. Ensure the controller sees the + * clearing before we start looking at any of the CPB + * statuses, so that any CPB completions after this + * point in the handler will raise another interrupt. + */ + writew(status, mmio + NV_ADMA_STAT); + readw(mmio + NV_ADMA_STAT); /* flush posted write */ + rmb(); + + handled++; /* irq handled if we got here */ + + /* freeze if hotplugged or controller error */ + if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | + NV_ADMA_STAT_HOTUNPLUG | + NV_ADMA_STAT_TIMEOUT | + NV_ADMA_STAT_SERROR))) { + struct ata_eh_info *ehi = &ap->link.eh_info; + + ata_ehi_clear_desc(ehi); + __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status); + if (status & NV_ADMA_STAT_TIMEOUT) { + ehi->err_mask |= AC_ERR_SYSTEM; + ata_ehi_push_desc(ehi, "timeout"); + } else if (status & NV_ADMA_STAT_HOTPLUG) { + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, "hotplug"); + } else if (status & NV_ADMA_STAT_HOTUNPLUG) { + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, "hot unplug"); + } else if (status & NV_ADMA_STAT_SERROR) { + /* let EH analyze SError and figure out cause */ + ata_ehi_push_desc(ehi, "SError"); + } else + ata_ehi_push_desc(ehi, "unknown"); + ata_port_freeze(ap); + continue; + } + + if (status & (NV_ADMA_STAT_DONE | + NV_ADMA_STAT_CPBERR | + NV_ADMA_STAT_CMD_COMPLETE)) { + u32 check_commands = notifier_clears[i]; + u32 done_mask = 0; + int pos, rc; + + if (status & NV_ADMA_STAT_CPBERR) { + /* check all active commands */ + if (ata_tag_valid(ap->link.active_tag)) + check_commands = 1 << + ap->link.active_tag; + else + check_commands = ap->link.sactive; + } + + /* check CPBs for completed commands */ + while ((pos = ffs(check_commands))) { + pos--; + rc = nv_adma_check_cpb(ap, pos, + notifier_error & (1 << pos)); + if (rc > 0) + done_mask |= 1 << pos; + else if (unlikely(rc < 0)) + check_commands = 0; + check_commands &= ~(1 << pos); + } + ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + } + } + + if (notifier_clears[0] || notifier_clears[1]) { + /* Note: Both notifier clear registers must be written + if either is set, even if one is zero, according to NVIDIA. */ + struct nv_adma_port_priv *pp = host->ports[0]->private_data; + writel(notifier_clears[0], pp->notifier_clear_block); + pp = host->ports[1]->private_data; + writel(notifier_clears[1], pp->notifier_clear_block); + } + + spin_unlock(&host->lock); + + return IRQ_RETVAL(handled); +} + +static void nv_adma_freeze(struct ata_port *ap) +{ + struct nv_adma_port_priv *pp = ap->private_data; + void __iomem *mmio = pp->ctl_block; + u16 tmp; + + nv_ck804_freeze(ap); + + if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) + return; + + /* clear any outstanding CK804 notifications */ + writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), + ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); + + /* Disable interrupt */ + tmp = readw(mmio + NV_ADMA_CTL); + writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), + mmio + NV_ADMA_CTL); + readw(mmio + NV_ADMA_CTL); /* flush posted write */ +} + +static void nv_adma_thaw(struct ata_port *ap) +{ + struct nv_adma_port_priv *pp = ap->private_data; + void __iomem *mmio = pp->ctl_block; + u16 tmp; + + nv_ck804_thaw(ap); + + if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) + return; + + /* Enable interrupt */ + tmp = readw(mmio + NV_ADMA_CTL); + writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), + mmio + NV_ADMA_CTL); + readw(mmio + NV_ADMA_CTL); /* flush posted write */ +} + +static void nv_adma_irq_clear(struct ata_port *ap) +{ + struct nv_adma_port_priv *pp = ap->private_data; + void __iomem *mmio = pp->ctl_block; + u32 notifier_clears[2]; + + if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { + ata_bmdma_irq_clear(ap); + return; + } + + /* clear any outstanding CK804 notifications */ + writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), + ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); + + /* clear ADMA status */ + writew(0xffff, mmio + NV_ADMA_STAT); + + /* clear notifiers - note both ports need to be written with + something even though we are only clearing on one */ + if (ap->port_no == 0) { + notifier_clears[0] = 0xFFFFFFFF; + notifier_clears[1] = 0; + } else { + notifier_clears[0] = 0; + notifier_clears[1] = 0xFFFFFFFF; + } + pp = ap->host->ports[0]->private_data; + writel(notifier_clears[0], pp->notifier_clear_block); + pp = ap->host->ports[1]->private_data; + writel(notifier_clears[1], pp->notifier_clear_block); +} + +static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) +{ + struct nv_adma_port_priv *pp = qc->ap->private_data; + + if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) + ata_bmdma_post_internal_cmd(qc); +} + +static int nv_adma_port_start(struct ata_port *ap) +{ + struct device *dev = ap->host->dev; + struct nv_adma_port_priv *pp; + int rc; + void *mem; + dma_addr_t mem_dma; + void __iomem *mmio; + struct pci_dev *pdev = to_pci_dev(dev); + u16 tmp; + + VPRINTK("ENTER\n"); + + /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and + pad buffers */ + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + /* we might fallback to bmdma, allocate bmdma resources */ + rc = ata_bmdma_port_start(ap); + if (rc) + return rc; + + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; + + mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT + + ap->port_no * NV_ADMA_PORT_SIZE; + pp->ctl_block = mmio; + pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN; + pp->notifier_clear_block = pp->gen_block + + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); + + /* Now that the legacy PRD and padding buffer are allocated we can + safely raise the DMA mask to allocate the CPB/APRD table. + These are allowed to fail since we store the value that ends up + being used to set as the bounce limit in slave_config later if + needed. */ + pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + pp->adma_dma_mask = *dev->dma_mask; + + mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, + &mem_dma, GFP_KERNEL); + if (!mem) + return -ENOMEM; + memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ); + + /* + * First item in chunk of DMA memory: + * 128-byte command parameter block (CPB) + * one for each command tag + */ + pp->cpb = mem; + pp->cpb_dma = mem_dma; + + writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); + writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); + + mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; + mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; + + /* + * Second item: block of ADMA_SGTBL_LEN s/g entries + */ + pp->aprd = mem; + pp->aprd_dma = mem_dma; + + ap->private_data = pp; + + /* clear any outstanding interrupt conditions */ + writew(0xffff, mmio + NV_ADMA_STAT); + + /* initialize port variables */ + pp->flags = NV_ADMA_PORT_REGISTER_MODE; + + /* clear CPB fetch count */ + writew(0, mmio + NV_ADMA_CPB_COUNT); + + /* clear GO for register mode, enable interrupt */ + tmp = readw(mmio + NV_ADMA_CTL); + writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN | + NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL); + + tmp = readw(mmio + NV_ADMA_CTL); + writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); + readw(mmio + NV_ADMA_CTL); /* flush posted write */ + udelay(1); + writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); + readw(mmio + NV_ADMA_CTL); /* flush posted write */ + + return 0; +} + +static void nv_adma_port_stop(struct ata_port *ap) +{ + struct nv_adma_port_priv *pp = ap->private_data; + void __iomem *mmio = pp->ctl_block; + + VPRINTK("ENTER\n"); + writew(0, mmio + NV_ADMA_CTL); +} + +#ifdef CONFIG_PM +static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg) +{ + struct nv_adma_port_priv *pp = ap->private_data; + void __iomem *mmio = pp->ctl_block; + + /* Go to register mode - clears GO */ + nv_adma_register_mode(ap); + + /* clear CPB fetch count */ + writew(0, mmio + NV_ADMA_CPB_COUNT); + + /* disable interrupt, shut down port */ + writew(0, mmio + NV_ADMA_CTL); + + return 0; +} + +static int nv_adma_port_resume(struct ata_port *ap) +{ + struct nv_adma_port_priv *pp = ap->private_data; + void __iomem *mmio = pp->ctl_block; + u16 tmp; + + /* set CPB block location */ + writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); + writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); + + /* clear any outstanding interrupt conditions */ + writew(0xffff, mmio + NV_ADMA_STAT); + + /* initialize port variables */ + pp->flags |= NV_ADMA_PORT_REGISTER_MODE; + + /* clear CPB fetch count */ + writew(0, mmio + NV_ADMA_CPB_COUNT); + + /* clear GO for register mode, enable interrupt */ + tmp = readw(mmio + NV_ADMA_CTL); + writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN | + NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL); + + tmp = readw(mmio + NV_ADMA_CTL); + writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); + readw(mmio + NV_ADMA_CTL); /* flush posted write */ + udelay(1); + writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); + readw(mmio + NV_ADMA_CTL); /* flush posted write */ + + return 0; +} +#endif + +static void nv_adma_setup_port(struct ata_port *ap) +{ + void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; + struct ata_ioports *ioport = &ap->ioaddr; + + VPRINTK("ENTER\n"); + + mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE; + + ioport->cmd_addr = mmio; + ioport->data_addr = mmio + (ATA_REG_DATA * 4); + ioport->error_addr = + ioport->feature_addr = mmio + (ATA_REG_ERR * 4); + ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4); + ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4); + ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4); + ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4); + ioport->device_addr = mmio + (ATA_REG_DEVICE * 4); + ioport->status_addr = + ioport->command_addr = mmio + (ATA_REG_STATUS * 4); + ioport->altstatus_addr = + ioport->ctl_addr = mmio + 0x20; +} + +static int nv_adma_host_init(struct ata_host *host) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + unsigned int i; + u32 tmp32; + + VPRINTK("ENTER\n"); + + /* enable ADMA on the ports */ + pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); + tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN | + NV_MCP_SATA_CFG_20_PORT0_PWB_EN | + NV_MCP_SATA_CFG_20_PORT1_EN | + NV_MCP_SATA_CFG_20_PORT1_PWB_EN; + + pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32); + + for (i = 0; i < host->n_ports; i++) + nv_adma_setup_port(host->ports[i]); + + return 0; +} + +static void nv_adma_fill_aprd(struct ata_queued_cmd *qc, + struct scatterlist *sg, + int idx, + struct nv_adma_prd *aprd) +{ + u8 flags = 0; + if (qc->tf.flags & ATA_TFLAG_WRITE) + flags |= NV_APRD_WRITE; + if (idx == qc->n_elem - 1) + flags |= NV_APRD_END; + else if (idx != 4) + flags |= NV_APRD_CONT; + + aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg))); + aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */ + aprd->flags = flags; + aprd->packet_len = 0; +} + +static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) +{ + struct nv_adma_port_priv *pp = qc->ap->private_data; + struct nv_adma_prd *aprd; + struct scatterlist *sg; + unsigned int si; + + VPRINTK("ENTER\n"); + + for_each_sg(qc->sg, sg, qc->n_elem, si) { + aprd = (si < 5) ? &cpb->aprd[si] : + &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)]; + nv_adma_fill_aprd(qc, sg, si, aprd); } + if (si > 5) + cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); + else + cpb->next_aprd = cpu_to_le64(0); +} + +static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) +{ + struct nv_adma_port_priv *pp = qc->ap->private_data; + + /* ADMA engine can only be used for non-ATAPI DMA commands, + or interrupt-driven no-data commands. */ + if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || + (qc->tf.flags & ATA_TFLAG_POLLING)) + return 1; + + if ((qc->flags & ATA_QCFLAG_DMAMAP) || + (qc->tf.protocol == ATA_PROT_NODATA)) + return 0; return 1; } -static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat) +static void nv_adma_qc_prep(struct ata_queued_cmd *qc) { - int i, handled = 0; + struct nv_adma_port_priv *pp = qc->ap->private_data; + struct nv_adma_cpb *cpb = &pp->cpb[qc->tag]; + u8 ctl_flags = NV_CPB_CTL_CPB_VALID | + NV_CPB_CTL_IEN; + + if (nv_adma_use_reg_mode(qc)) { + BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && + (qc->flags & ATA_QCFLAG_DMAMAP)); + nv_adma_register_mode(qc->ap); + ata_bmdma_qc_prep(qc); + return; + } + + cpb->resp_flags = NV_CPB_RESP_DONE; + wmb(); + cpb->ctl_flags = 0; + wmb(); + + cpb->len = 3; + cpb->tag = qc->tag; + cpb->next_cpb_idx = 0; + + /* turn on NCQ flags for NCQ commands */ + if (qc->tf.protocol == ATA_PROT_NCQ) + ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA; + + VPRINTK("qc->flags = 0x%lx\n", qc->flags); + + nv_adma_tf_to_cpb(&qc->tf, cpb->tf); + + if (qc->flags & ATA_QCFLAG_DMAMAP) { + nv_adma_fill_sg(qc, cpb); + ctl_flags |= NV_CPB_CTL_APRD_VALID; + } else + memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5); + + /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID + until we are finished filling in all of the contents */ + wmb(); + cpb->ctl_flags = ctl_flags; + wmb(); + cpb->resp_flags = 0; +} + +static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) +{ + struct nv_adma_port_priv *pp = qc->ap->private_data; + void __iomem *mmio = pp->ctl_block; + int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); + + VPRINTK("ENTER\n"); + + /* We can't handle result taskfile with NCQ commands, since + retrieving the taskfile switches us out of ADMA mode and would abort + existing commands. */ + if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && + (qc->flags & ATA_QCFLAG_RESULT_TF))) { + ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n"); + return AC_ERR_SYSTEM; + } + + if (nv_adma_use_reg_mode(qc)) { + /* use ATA register mode */ + VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); + BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && + (qc->flags & ATA_QCFLAG_DMAMAP)); + nv_adma_register_mode(qc->ap); + return ata_bmdma_qc_issue(qc); + } else + nv_adma_mode(qc->ap); + + /* write append register, command tag in lower 8 bits + and (number of cpbs to append -1) in top 8 bits */ + wmb(); + + if (curr_ncq != pp->last_issue_ncq) { + /* Seems to need some delay before switching between NCQ and + non-NCQ commands, else we get command timeouts and such. */ + udelay(20); + pp->last_issue_ncq = curr_ncq; + } + + writew(qc->tag, mmio + NV_ADMA_APPEND); + + DPRINTK("Issued tag %u\n", qc->tag); + + return 0; +} + +static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + unsigned int i; + unsigned int handled = 0; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { + handled += ata_bmdma_port_intr(ap, qc); + } else { + /* + * No request pending? Clear interrupt status + * anyway, in case there's one pending. + */ + ap->ops->sff_check_status(ap); + } + } - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) - handled += nv_host_intr(ap, irq_stat); + spin_unlock_irqrestore(&host->lock, flags); + + return IRQ_RETVAL(handled); +} + +static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat) +{ + int i, handled = 0; + for (i = 0; i < host->n_ports; i++) { + handled += nv_host_intr(host->ports[i], irq_stat); irq_stat >>= NV_INT_PORT_SHIFT; } return IRQ_RETVAL(handled); } -static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance, - struct pt_regs *regs) +static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; u8 irq_stat; irqreturn_t ret; spin_lock(&host->lock); - irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); + irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); ret = nv_do_interrupt(host, irq_stat); spin_unlock(&host->lock); return ret; } -static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance, - struct pt_regs *regs) +static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; u8 irq_stat; irqreturn_t ret; spin_lock(&host->lock); - irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804); + irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); ret = nv_do_interrupt(host, irq_stat); spin_unlock(&host->lock); return ret; } -static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; + return -EINVAL; - return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); + *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } -static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return; + return -EINVAL; + + iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; +} + +static int nv_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_eh_context *ehc = &link->eh_context; - iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); + /* Do hardreset iff it's post-boot probing, please read the + * comment above port ops for details. + */ + if (!(link->ap->pflags & ATA_PFLAG_LOADING) && + !ata_dev_enabled(link->device)) + sata_link_hardreset(link, sata_deb_timing_hotplug, deadline, + NULL, NULL); + else { + const unsigned long *timing = sata_ehc_deb_timing(ehc); + int rc; + + if (!(ehc->i.flags & ATA_EHI_QUIET)) + ata_link_info(link, + "nv: skipping hardreset on occupied port\n"); + + /* make sure the link is online */ + rc = sata_link_resume(link, timing, deadline); + /* whine about phy resume failure but proceed */ + if (rc && rc != -EOPNOTSUPP) + ata_link_warn(link, "failed to resume link (errno=%d)\n", + rc); + } + + /* device signature acquisition is unreliable */ + return -EAGAIN; } static void nv_nf2_freeze(struct ata_port *ap) { - unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr; + void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; - mask = inb(scr_addr + NV_INT_ENABLE); + mask = ioread8(scr_addr + NV_INT_ENABLE); mask &= ~(NV_INT_ALL << shift); - outb(mask, scr_addr + NV_INT_ENABLE); + iowrite8(mask, scr_addr + NV_INT_ENABLE); } static void nv_nf2_thaw(struct ata_port *ap) { - unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr; + void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; - outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS); + iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS); - mask = inb(scr_addr + NV_INT_ENABLE); + mask = ioread8(scr_addr + NV_INT_ENABLE); mask |= (NV_INT_MASK << shift); - outb(mask, scr_addr + NV_INT_ENABLE); + iowrite8(mask, scr_addr + NV_INT_ENABLE); } static void nv_ck804_freeze(struct ata_port *ap) { - void __iomem *mmio_base = ap->host->mmio_base; + void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; @@ -453,7 +1630,7 @@ static void nv_ck804_freeze(struct ata_port *ap) static void nv_ck804_thaw(struct ata_port *ap) { - void __iomem *mmio_base = ap->host->mmio_base; + void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; @@ -464,80 +1641,772 @@ static void nv_ck804_thaw(struct ata_port *ap) writeb(mask, mmio_base + NV_INT_ENABLE_CK804); } -static int nv_hardreset(struct ata_port *ap, unsigned int *class) +static void nv_mcp55_freeze(struct ata_port *ap) { - unsigned int dummy; + void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; + int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; + u32 mask; - /* SATA hardreset fails to retrieve proper device signature on - * some controllers. Don't classify on hardreset. For more - * info, see http://bugme.osdl.org/show_bug.cgi?id=3352 - */ - return sata_std_hardreset(ap, &dummy); + writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55); + + mask = readl(mmio_base + NV_INT_ENABLE_MCP55); + mask &= ~(NV_INT_ALL_MCP55 << shift); + writel(mask, mmio_base + NV_INT_ENABLE_MCP55); +} + +static void nv_mcp55_thaw(struct ata_port *ap) +{ + void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; + int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; + u32 mask; + + writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55); + + mask = readl(mmio_base + NV_INT_ENABLE_MCP55); + mask |= (NV_INT_MASK_MCP55 << shift); + writel(mask, mmio_base + NV_INT_ENABLE_MCP55); +} + +static void nv_adma_error_handler(struct ata_port *ap) +{ + struct nv_adma_port_priv *pp = ap->private_data; + if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { + void __iomem *mmio = pp->ctl_block; + int i; + u16 tmp; + + if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { + u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); + u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); + u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); + u32 status = readw(mmio + NV_ADMA_STAT); + u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT); + u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX); + + ata_port_err(ap, + "EH in ADMA mode, notifier 0x%X " + "notifier_error 0x%X gen_ctl 0x%X status 0x%X " + "next cpb count 0x%X next cpb idx 0x%x\n", + notifier, notifier_error, gen_ctl, status, + cpb_count, next_cpb_idx); + + for (i = 0; i < NV_ADMA_MAX_CPBS; i++) { + struct nv_adma_cpb *cpb = &pp->cpb[i]; + if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || + ap->link.sactive & (1 << i)) + ata_port_err(ap, + "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n", + i, cpb->ctl_flags, cpb->resp_flags); + } + } + + /* Push us back into port register mode for error handling. */ + nv_adma_register_mode(ap); + + /* Mark all of the CPBs as invalid to prevent them from + being executed */ + for (i = 0; i < NV_ADMA_MAX_CPBS; i++) + pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID; + + /* clear CPB fetch count */ + writew(0, mmio + NV_ADMA_CPB_COUNT); + + /* Reset channel */ + tmp = readw(mmio + NV_ADMA_CTL); + writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); + readw(mmio + NV_ADMA_CTL); /* flush posted write */ + udelay(1); + writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); + readw(mmio + NV_ADMA_CTL); /* flush posted write */ + } + + ata_bmdma_error_handler(ap); +} + +static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + struct nv_swncq_port_priv *pp = ap->private_data; + struct defer_queue *dq = &pp->defer_queue; + + /* queue is full */ + WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE); + dq->defer_bits |= (1 << qc->tag); + dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag; +} + +static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap) +{ + struct nv_swncq_port_priv *pp = ap->private_data; + struct defer_queue *dq = &pp->defer_queue; + unsigned int tag; + + if (dq->head == dq->tail) /* null queue */ + return NULL; + + tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)]; + dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON; + WARN_ON(!(dq->defer_bits & (1 << tag))); + dq->defer_bits &= ~(1 << tag); + + return ata_qc_from_tag(ap, tag); +} + +static void nv_swncq_fis_reinit(struct ata_port *ap) +{ + struct nv_swncq_port_priv *pp = ap->private_data; + + pp->dhfis_bits = 0; + pp->dmafis_bits = 0; + pp->sdbfis_bits = 0; + pp->ncq_flags = 0; +} + +static void nv_swncq_pp_reinit(struct ata_port *ap) +{ + struct nv_swncq_port_priv *pp = ap->private_data; + struct defer_queue *dq = &pp->defer_queue; + + dq->head = 0; + dq->tail = 0; + dq->defer_bits = 0; + pp->qc_active = 0; + pp->last_issue_tag = ATA_TAG_POISON; + nv_swncq_fis_reinit(ap); +} + +static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis) +{ + struct nv_swncq_port_priv *pp = ap->private_data; + + writew(fis, pp->irq_block); +} + +static void __ata_bmdma_stop(struct ata_port *ap) +{ + struct ata_queued_cmd qc; + + qc.ap = ap; + ata_bmdma_stop(&qc); +} + +static void nv_swncq_ncq_stop(struct ata_port *ap) +{ + struct nv_swncq_port_priv *pp = ap->private_data; + unsigned int i; + u32 sactive; + u32 done_mask; + + ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n", + ap->qc_active, ap->link.sactive); + ata_port_err(ap, + "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n " + "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n", + pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag, + pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits); + + ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n", + ap->ops->sff_check_status(ap), + ioread8(ap->ioaddr.error_addr)); + + sactive = readl(pp->sactive_block); + done_mask = pp->qc_active ^ sactive; + + ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n"); + for (i = 0; i < ATA_MAX_QUEUE; i++) { + u8 err = 0; + if (pp->qc_active & (1 << i)) + err = 0; + else if (done_mask & (1 << i)) + err = 1; + else + continue; + + ata_port_err(ap, + "tag 0x%x: %01x %01x %01x %01x %s\n", i, + (pp->dhfis_bits >> i) & 0x1, + (pp->dmafis_bits >> i) & 0x1, + (pp->sdbfis_bits >> i) & 0x1, + (sactive >> i) & 0x1, + (err ? "error! tag doesn't exit" : " ")); + } + + nv_swncq_pp_reinit(ap); + ap->ops->sff_irq_clear(ap); + __ata_bmdma_stop(ap); + nv_swncq_irq_clear(ap, 0xffff); +} + +static void nv_swncq_error_handler(struct ata_port *ap) +{ + struct ata_eh_context *ehc = &ap->link.eh_context; + + if (ap->link.sactive) { + nv_swncq_ncq_stop(ap); + ehc->i.action |= ATA_EH_RESET; + } + + ata_bmdma_error_handler(ap); +} + +#ifdef CONFIG_PM +static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg) +{ + void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; + u32 tmp; + + /* clear irq */ + writel(~0, mmio + NV_INT_STATUS_MCP55); + + /* disable irq */ + writel(0, mmio + NV_INT_ENABLE_MCP55); + + /* disable swncq */ + tmp = readl(mmio + NV_CTL_MCP55); + tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ); + writel(tmp, mmio + NV_CTL_MCP55); + + return 0; +} + +static int nv_swncq_port_resume(struct ata_port *ap) +{ + void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; + u32 tmp; + + /* clear irq */ + writel(~0, mmio + NV_INT_STATUS_MCP55); + + /* enable irq */ + writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55); + + /* enable swncq */ + tmp = readl(mmio + NV_CTL_MCP55); + writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55); + + return 0; +} +#endif + +static void nv_swncq_host_init(struct ata_host *host) +{ + u32 tmp; + void __iomem *mmio = host->iomap[NV_MMIO_BAR]; + struct pci_dev *pdev = to_pci_dev(host->dev); + u8 regval; + + /* disable ECO 398 */ + pci_read_config_byte(pdev, 0x7f, ®val); + regval &= ~(1 << 7); + pci_write_config_byte(pdev, 0x7f, regval); + + /* enable swncq */ + tmp = readl(mmio + NV_CTL_MCP55); + VPRINTK("HOST_CTL:0x%X\n", tmp); + writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55); + + /* enable irq intr */ + tmp = readl(mmio + NV_INT_ENABLE_MCP55); + VPRINTK("HOST_ENABLE:0x%X\n", tmp); + writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55); + + /* clear port irq */ + writel(~0x0, mmio + NV_INT_STATUS_MCP55); +} + +static int nv_swncq_slave_config(struct scsi_device *sdev) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct ata_device *dev; + int rc; + u8 rev; + u8 check_maxtor = 0; + unsigned char model_num[ATA_ID_PROD_LEN + 1]; + + rc = ata_scsi_slave_config(sdev); + if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) + /* Not a proper libata device, ignore */ + return rc; + + dev = &ap->link.device[sdev->id]; + if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI) + return rc; + + /* if MCP51 and Maxtor, then disable ncq */ + if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA || + pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2) + check_maxtor = 1; + + /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */ + if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA || + pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) { + pci_read_config_byte(pdev, 0x8, &rev); + if (rev <= 0xa2) + check_maxtor = 1; + } + + if (!check_maxtor) + return rc; + + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); + + if (strncmp(model_num, "Maxtor", 6) == 0) { + ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT); + ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n", + sdev->queue_depth); + } + + return rc; +} + +static int nv_swncq_port_start(struct ata_port *ap) +{ + struct device *dev = ap->host->dev; + void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; + struct nv_swncq_port_priv *pp; + int rc; + + /* we might fallback to bmdma, allocate bmdma resources */ + rc = ata_bmdma_port_start(ap); + if (rc) + return rc; + + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; + + pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE, + &pp->prd_dma, GFP_KERNEL); + if (!pp->prd) + return -ENOMEM; + memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE); + + ap->private_data = pp; + pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE; + pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2; + pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2; + + return 0; +} + +static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) +{ + if (qc->tf.protocol != ATA_PROT_NCQ) { + ata_bmdma_qc_prep(qc); + return; + } + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + nv_swncq_fill_sg(qc); +} + +static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct scatterlist *sg; + struct nv_swncq_port_priv *pp = ap->private_data; + struct ata_bmdma_prd *prd; + unsigned int si, idx; + + prd = pp->prd + ATA_MAX_PRD * qc->tag; + + idx = 0; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + u32 addr, offset; + u32 sg_len, len; + + addr = (u32)sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + offset = addr & 0xffff; + len = sg_len; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + prd[idx].addr = cpu_to_le32(addr); + prd[idx].flags_len = cpu_to_le32(len & 0xffff); + + idx++; + sg_len -= len; + addr += len; + } + } + + prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); +} + +static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + struct nv_swncq_port_priv *pp = ap->private_data; + + if (qc == NULL) + return 0; + + DPRINTK("Enter\n"); + + writel((1 << qc->tag), pp->sactive_block); + pp->last_issue_tag = qc->tag; + pp->dhfis_bits &= ~(1 << qc->tag); + pp->dmafis_bits &= ~(1 << qc->tag); + pp->qc_active |= (0x1 << qc->tag); + + ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->sff_exec_command(ap, &qc->tf); + + DPRINTK("Issued tag %u\n", qc->tag); + + return 0; +} + +static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct nv_swncq_port_priv *pp = ap->private_data; + + if (qc->tf.protocol != ATA_PROT_NCQ) + return ata_bmdma_qc_issue(qc); + + DPRINTK("Enter\n"); + + if (!pp->qc_active) + nv_swncq_issue_atacmd(ap, qc); + else + nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */ + + return 0; +} + +static void nv_swncq_hotplug(struct ata_port *ap, u32 fis) +{ + u32 serror; + struct ata_eh_info *ehi = &ap->link.eh_info; + + ata_ehi_clear_desc(ehi); + + /* AHCI needs SError cleared; otherwise, it might lock up */ + sata_scr_read(&ap->link, SCR_ERROR, &serror); + sata_scr_write(&ap->link, SCR_ERROR, serror); + + /* analyze @irq_stat */ + if (fis & NV_SWNCQ_IRQ_ADDED) + ata_ehi_push_desc(ehi, "hot plug"); + else if (fis & NV_SWNCQ_IRQ_REMOVED) + ata_ehi_push_desc(ehi, "hot unplug"); + + ata_ehi_hotplugged(ehi); + + /* okay, let's hand over to EH */ + ehi->serror |= serror; + + ata_port_freeze(ap); +} + +static int nv_swncq_sdbfis(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + struct nv_swncq_port_priv *pp = ap->private_data; + struct ata_eh_info *ehi = &ap->link.eh_info; + u32 sactive; + u32 done_mask; + u8 host_stat; + u8 lack_dhfis = 0; + + host_stat = ap->ops->bmdma_status(ap); + if (unlikely(host_stat & ATA_DMA_ERR)) { + /* error when transferring data to/from memory */ + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); + ehi->err_mask |= AC_ERR_HOST_BUS; + ehi->action |= ATA_EH_RESET; + return -EINVAL; + } + + ap->ops->sff_irq_clear(ap); + __ata_bmdma_stop(ap); + + sactive = readl(pp->sactive_block); + done_mask = pp->qc_active ^ sactive; + + pp->qc_active &= ~done_mask; + pp->dhfis_bits &= ~done_mask; + pp->dmafis_bits &= ~done_mask; + pp->sdbfis_bits |= done_mask; + ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + + if (!ap->qc_active) { + DPRINTK("over\n"); + nv_swncq_pp_reinit(ap); + return 0; + } + + if (pp->qc_active & pp->dhfis_bits) + return 0; + + if ((pp->ncq_flags & ncq_saw_backout) || + (pp->qc_active ^ pp->dhfis_bits)) + /* if the controller can't get a device to host register FIS, + * The driver needs to reissue the new command. + */ + lack_dhfis = 1; + + DPRINTK("id 0x%x QC: qc_active 0x%x," + "SWNCQ:qc_active 0x%X defer_bits %X " + "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n", + ap->print_id, ap->qc_active, pp->qc_active, + pp->defer_queue.defer_bits, pp->dhfis_bits, + pp->dmafis_bits, pp->last_issue_tag); + + nv_swncq_fis_reinit(ap); + + if (lack_dhfis) { + qc = ata_qc_from_tag(ap, pp->last_issue_tag); + nv_swncq_issue_atacmd(ap, qc); + return 0; + } + + if (pp->defer_queue.defer_bits) { + /* send deferral queue command */ + qc = nv_swncq_qc_from_dq(ap); + WARN_ON(qc == NULL); + nv_swncq_issue_atacmd(ap, qc); + } + + return 0; +} + +static inline u32 nv_swncq_tag(struct ata_port *ap) +{ + struct nv_swncq_port_priv *pp = ap->private_data; + u32 tag; + + tag = readb(pp->tag_block) >> 2; + return (tag & 0x1f); +} + +static void nv_swncq_dmafis(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + unsigned int rw; + u8 dmactl; + u32 tag; + struct nv_swncq_port_priv *pp = ap->private_data; + + __ata_bmdma_stop(ap); + tag = nv_swncq_tag(ap); + + DPRINTK("dma setup tag 0x%x\n", tag); + qc = ata_qc_from_tag(ap, tag); + + if (unlikely(!qc)) + return; + + rw = qc->tf.flags & ATA_TFLAG_WRITE; + + /* load PRD table addr. */ + iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag, + ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); + + /* specify data direction, triple-check start bit is clear */ + dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + dmactl &= ~ATA_DMA_WR; + if (!rw) + dmactl |= ATA_DMA_WR; + + iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); +} + +static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) +{ + struct nv_swncq_port_priv *pp = ap->private_data; + struct ata_queued_cmd *qc; + struct ata_eh_info *ehi = &ap->link.eh_info; + u32 serror; + u8 ata_stat; + + ata_stat = ap->ops->sff_check_status(ap); + nv_swncq_irq_clear(ap, fis); + if (!fis) + return; + + if (ap->pflags & ATA_PFLAG_FROZEN) + return; + + if (fis & NV_SWNCQ_IRQ_HOTPLUG) { + nv_swncq_hotplug(ap, fis); + return; + } + + if (!pp->qc_active) + return; + + if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror)) + return; + ap->ops->scr_write(&ap->link, SCR_ERROR, serror); + + if (ata_stat & ATA_ERR) { + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis); + ehi->err_mask |= AC_ERR_DEV; + ehi->serror |= serror; + ehi->action |= ATA_EH_RESET; + ata_port_freeze(ap); + return; + } + + if (fis & NV_SWNCQ_IRQ_BACKOUT) { + /* If the IRQ is backout, driver must issue + * the new command again some time later. + */ + pp->ncq_flags |= ncq_saw_backout; + } + + if (fis & NV_SWNCQ_IRQ_SDBFIS) { + pp->ncq_flags |= ncq_saw_sdb; + DPRINTK("id 0x%x SWNCQ: qc_active 0x%X " + "dhfis 0x%X dmafis 0x%X sactive 0x%X\n", + ap->print_id, pp->qc_active, pp->dhfis_bits, + pp->dmafis_bits, readl(pp->sactive_block)); + if (nv_swncq_sdbfis(ap) < 0) + goto irq_error; + } + + if (fis & NV_SWNCQ_IRQ_DHREGFIS) { + /* The interrupt indicates the new command + * was transmitted correctly to the drive. + */ + pp->dhfis_bits |= (0x1 << pp->last_issue_tag); + pp->ncq_flags |= ncq_saw_d2h; + if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) { + ata_ehi_push_desc(ehi, "illegal fis transaction"); + ehi->err_mask |= AC_ERR_HSM; + ehi->action |= ATA_EH_RESET; + goto irq_error; + } + + if (!(fis & NV_SWNCQ_IRQ_DMASETUP) && + !(pp->ncq_flags & ncq_saw_dmas)) { + ata_stat = ap->ops->sff_check_status(ap); + if (ata_stat & ATA_BUSY) + goto irq_exit; + + if (pp->defer_queue.defer_bits) { + DPRINTK("send next command\n"); + qc = nv_swncq_qc_from_dq(ap); + nv_swncq_issue_atacmd(ap, qc); + } + } + } + + if (fis & NV_SWNCQ_IRQ_DMASETUP) { + /* program the dma controller with appropriate PRD buffers + * and start the DMA transfer for requested command. + */ + pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); + pp->ncq_flags |= ncq_saw_dmas; + nv_swncq_dmafis(ap); + } + +irq_exit: + return; +irq_error: + ata_ehi_push_desc(ehi, "fis:0x%x", fis); + ata_port_freeze(ap); + return; } -static void nv_error_handler(struct ata_port *ap) +static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance) { - ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, - nv_hardreset, ata_std_postreset); + struct ata_host *host = dev_instance; + unsigned int i; + unsigned int handled = 0; + unsigned long flags; + u32 irq_stat; + + spin_lock_irqsave(&host->lock, flags); + + irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55); + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + if (ap->link.sactive) { + nv_swncq_host_interrupt(ap, (u16)irq_stat); + handled = 1; + } else { + if (irq_stat) /* reserve Hotplug */ + nv_swncq_irq_clear(ap, 0xfff0); + + handled += nv_host_intr(ap, (u8)irq_stat); + } + irq_stat >>= NV_INT_PORT_SHIFT_MCP55; + } + + spin_unlock_irqrestore(&host->lock, flags); + + return IRQ_RETVAL(handled); } -static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version = 0; - struct ata_port_info *ppi[2]; - struct ata_probe_ent *probe_ent; - int pci_dev_busy = 0; + const struct ata_port_info *ppi[] = { NULL, NULL }; + struct nv_pi_priv *ipriv; + struct ata_host *host; + struct nv_host_priv *hpriv; int rc; u32 bar; - unsigned long base; + void __iomem *base; + unsigned long type = ent->driver_data; // Make sure this is a SATA controller by counting the number of bars // (NVIDIA SATA controllers will always have six bars). Otherwise, // it's an IDE controller and we ignore it. - for (bar=0; bar<6; bar++) + for (bar = 0; bar < 6; bar++) if (pci_resource_start(pdev, bar) == 0) return -ENODEV; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - rc = pci_enable_device(pdev); + rc = pcim_enable_device(pdev); if (rc) - goto err_out; - - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - pci_dev_busy = 1; - goto err_out_disable; + return rc; + + /* determine type and allocate host */ + if (type == CK804 && adma_enabled) { + dev_notice(&pdev->dev, "Using ADMA mode\n"); + type = ADMA; + } else if (type == MCP5x && swncq_enabled) { + dev_notice(&pdev->dev, "Using SWNCQ mode\n"); + type = SWNCQ; } - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + ppi[0] = &nv_port_info[type]; + ipriv = ppi[0]->private_data; + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) - goto err_out_regions; - - rc = -ENOMEM; - - ppi[0] = ppi[1] = &nv_port_info[ent->driver_data]; - probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); - if (!probe_ent) - goto err_out_regions; + return rc; - probe_ent->mmio_base = pci_iomap(pdev, 5, 0); - if (!probe_ent->mmio_base) { - rc = -EIO; - goto err_out_free_ent; - } + hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + hpriv->type = type; + host->private_data = hpriv; - base = (unsigned long)probe_ent->mmio_base; + /* request and iomap NV_MMIO_BAR */ + rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME); + if (rc) + return rc; - probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET; - probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET; + /* configure SCR access */ + base = host->iomap[NV_MMIO_BAR]; + host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET; + host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET; /* enable SATA space for CK804 */ - if (ent->driver_data == CK804) { + if (type >= CK804) { u8 regval; pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val); @@ -545,28 +2414,72 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); } + /* init ADMA */ + if (type == ADMA) { + rc = nv_adma_host_init(host); + if (rc) + return rc; + } else if (type == SWNCQ) + nv_swncq_host_init(host); + + if (msi_enabled) { + dev_notice(&pdev->dev, "Using MSI\n"); + pci_enable_msi(pdev); + } + pci_set_master(pdev); + return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht); +} - rc = ata_device_add(probe_ent); - if (rc != NV_PORTS) - goto err_out_iounmap; +#ifdef CONFIG_PM_SLEEP +static int nv_pci_device_resume(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + struct nv_host_priv *hpriv = host->private_data; + int rc; - kfree(probe_ent); + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; - return 0; + if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { + if (hpriv->type >= CK804) { + u8 regval; -err_out_iounmap: - pci_iounmap(pdev, probe_ent->mmio_base); -err_out_free_ent: - kfree(probe_ent); -err_out_regions: - pci_release_regions(pdev); -err_out_disable: - if (!pci_dev_busy) - pci_disable_device(pdev); -err_out: - return rc; + pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val); + regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN; + pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); + } + if (hpriv->type == ADMA) { + u32 tmp32; + struct nv_adma_port_priv *pp; + /* enable/disable ADMA on the ports appropriately */ + pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); + + pp = host->ports[0]->private_data; + if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) + tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN | + NV_MCP_SATA_CFG_20_PORT0_PWB_EN); + else + tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN | + NV_MCP_SATA_CFG_20_PORT0_PWB_EN); + pp = host->ports[1]->private_data; + if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) + tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN | + NV_MCP_SATA_CFG_20_PORT1_PWB_EN); + else + tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN | + NV_MCP_SATA_CFG_20_PORT1_PWB_EN); + + pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32); + } + } + + ata_host_resume(host); + + return 0; } +#endif static void nv_ck804_host_stop(struct ata_host *host) { @@ -577,19 +2490,30 @@ static void nv_ck804_host_stop(struct ata_host *host) pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val); regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN; pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); - - ata_pci_host_stop(host); } -static int __init nv_init(void) +static void nv_adma_host_stop(struct ata_host *host) { - return pci_register_driver(&nv_pci_driver); -} + struct pci_dev *pdev = to_pci_dev(host->dev); + u32 tmp32; -static void __exit nv_exit(void) -{ - pci_unregister_driver(&nv_pci_driver); + /* disable ADMA on the ports */ + pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); + tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN | + NV_MCP_SATA_CFG_20_PORT0_PWB_EN | + NV_MCP_SATA_CFG_20_PORT1_EN | + NV_MCP_SATA_CFG_20_PORT1_PWB_EN); + + pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32); + + nv_ck804_host_stop(host); } -module_init(nv_init); -module_exit(nv_exit); +module_pci_driver(nv_pci_driver); + +module_param_named(adma, adma_enabled, bool, 0444); +MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)"); +module_param_named(swncq, swncq_enabled, bool, 0444); +MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)"); +module_param_named(msi, msi_enabled, bool, 0444); +MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)"); diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index d627812ea73..3638887476f 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -1,7 +1,8 @@ /* * sata_promise.c - Promise SATA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> + * Mikael Pettersson * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -32,54 +33,113 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/gfp.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> -#include <linux/sched.h> #include <linux/device.h> +#include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> -#include <asm/io.h> #include "sata_promise.h" #define DRV_NAME "sata_promise" -#define DRV_VERSION "1.04" - +#define DRV_VERSION "2.12" enum { - PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ + PDC_MAX_PORTS = 4, + PDC_MMIO_BAR = 3, + PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */ + + /* host register offsets (from host->iomap[PDC_MMIO_BAR]) */ PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ - PDC_TBG_MODE = 0x41, /* TBG mode */ PDC_FLASH_CTL = 0x44, /* Flash control register */ - PDC_PCI_CTL = 0x48, /* PCI control and status register */ - PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ - PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ + PDC_PCI_CTL = 0x48, /* PCI control/status reg */ PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */ - PDC_SLEW_CTL = 0x470, /* slew rate control reg */ + PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */ + PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */ + + /* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */ + PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */ + PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */ + PDC_SECTOR_NUMBER = 0x0C, /* Sector number reg (per port) */ + PDC_CYLINDER_LOW = 0x10, /* Cylinder low reg (per port) */ + PDC_CYLINDER_HIGH = 0x14, /* Cylinder high reg (per port) */ + PDC_DEVICE = 0x18, /* Device/Head reg (per port) */ + PDC_COMMAND = 0x1C, /* Command/status reg (per port) */ + PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */ + PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ + PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ + PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ - PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | - (1<<8) | (1<<9) | (1<<10), + /* per-port SATA register offsets (from ap->ioaddr.scr_addr) */ + PDC_SATA_ERROR = 0x04, + PDC_PHYMODE4 = 0x14, + PDC_LINK_LAYER_ERRORS = 0x6C, + PDC_FPDMA_CTLSTAT = 0xD8, + PDC_INTERNAL_DEBUG_1 = 0xF8, /* also used for PATA */ + PDC_INTERNAL_DEBUG_2 = 0xFC, /* also used for PATA */ + + /* PDC_FPDMA_CTLSTAT bit definitions */ + PDC_FPDMA_CTLSTAT_RESET = 1 << 3, + PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG = 1 << 10, + PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG = 1 << 11, + + /* PDC_GLOBAL_CTL bit definitions */ + PDC_PH_ERR = (1 << 8), /* PCI error while loading packet */ + PDC_SH_ERR = (1 << 9), /* PCI error while loading S/G table */ + PDC_DH_ERR = (1 << 10), /* PCI error while loading data */ + PDC2_HTO_ERR = (1 << 12), /* host bus timeout */ + PDC2_ATA_HBA_ERR = (1 << 13), /* error during SATA DATA FIS transmission */ + PDC2_ATA_DMA_CNT_ERR = (1 << 14), /* DMA DATA FIS size differs from S/G count */ + PDC_OVERRUN_ERR = (1 << 19), /* S/G byte count larger than HD requires */ + PDC_UNDERRUN_ERR = (1 << 20), /* S/G byte count less than HD requires */ + PDC_DRIVE_ERR = (1 << 21), /* drive error */ + PDC_PCI_SYS_ERR = (1 << 22), /* PCI system error */ + PDC1_PCI_PARITY_ERR = (1 << 23), /* PCI parity error (from SATA150 driver) */ + PDC1_ERR_MASK = PDC1_PCI_PARITY_ERR, + PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR | + PDC2_ATA_DMA_CNT_ERR, + PDC_ERR_MASK = PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | + PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR | + PDC_DRIVE_ERR | PDC_PCI_SYS_ERR | + PDC1_ERR_MASK | PDC2_ERR_MASK, board_2037x = 0, /* FastTrak S150 TX2plus */ - board_20319 = 1, /* FastTrak S150 TX4 */ - board_20619 = 2, /* FastTrak TX4000 */ - board_20771 = 3, /* FastTrak TX2300 */ + board_2037x_pata = 1, /* FastTrak S150 TX2plus PATA port */ + board_20319 = 2, /* FastTrak S150 TX4 */ + board_20619 = 3, /* FastTrak TX4000 */ board_2057x = 4, /* SATAII150 Tx2plus */ - board_40518 = 5, /* SATAII150 Tx4 */ + board_2057x_pata = 5, /* SATAII150 Tx2plus PATA port */ + board_40518 = 6, /* SATAII150 Tx4 */ PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */ + /* Sequence counter control registers bit definitions */ + PDC_SEQCNTRL_INT_MASK = (1 << 5), /* Sequence Interrupt Mask */ + + /* Feature register values */ + PDC_FEATURE_ATAPI_PIO = 0x00, /* ATAPI data xfer by PIO */ + PDC_FEATURE_ATAPI_DMA = 0x01, /* ATAPI data xfer by DMA */ + + /* Device/Head register values */ + PDC_DEVICE_SATA = 0xE0, /* Device/Head value for SATA devices */ + + /* PDC_CTLSTAT bit definitions */ + PDC_DMA_ENABLE = (1 << 7), + PDC_IRQ_DISABLE = (1 << 10), PDC_RESET = (1 << 11), /* HDMA reset */ - PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | - ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | - ATA_FLAG_PIO_POLLING, -}; + PDC_COMMON_FLAGS = ATA_FLAG_PIO_POLLING, + /* ap->flags bits */ + PDC_FLAG_GEN_II = (1 << 24), + PDC_FLAG_SATA_PATA = (1 << 25), /* supports SATA + PATA */ + PDC_FLAG_4_PORTS = (1 << 26), /* 4 ports */ +}; struct pdc_port_priv { u8 *pkt; @@ -87,201 +147,180 @@ struct pdc_port_priv { }; struct pdc_host_priv { - int hotplug_offset; + spinlock_t hard_reset_lock; }; -static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); -static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs); -static void pdc_eng_timeout(struct ata_port *ap); -static int pdc_port_start(struct ata_port *ap); -static void pdc_port_stop(struct ata_port *ap); -static void pdc_pata_phy_reset(struct ata_port *ap); -static void pdc_sata_phy_reset(struct ata_port *ap); +static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); +static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); +static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int pdc_common_port_start(struct ata_port *ap); +static int pdc_sata_port_start(struct ata_port *ap); static void pdc_qc_prep(struct ata_queued_cmd *qc); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); +static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); +static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc); static void pdc_irq_clear(struct ata_port *ap); -static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc); -static void pdc_host_stop(struct ata_host *host); - +static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc); +static void pdc_freeze(struct ata_port *ap); +static void pdc_sata_freeze(struct ata_port *ap); +static void pdc_thaw(struct ata_port *ap); +static void pdc_sata_thaw(struct ata_port *ap); +static int pdc_pata_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static void pdc_error_handler(struct ata_port *ap); +static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); +static int pdc_pata_cable_detect(struct ata_port *ap); +static int pdc_sata_cable_detect(struct ata_port *ap); static struct scsi_host_template pdc_ata_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, + ATA_BASE_SHT(DRV_NAME), + .sg_tablesize = PDC_MAX_PRD, .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, }; -static const struct ata_port_operations pdc_sata_ops = { - .port_disable = ata_port_disable, - .tf_load = pdc_tf_load_mmio, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = pdc_exec_command_mmio, - .dev_select = ata_std_dev_select, - - .phy_reset = pdc_sata_phy_reset, +static const struct ata_port_operations pdc_common_ops = { + .inherits = &ata_sff_port_ops, + .sff_tf_load = pdc_tf_load_mmio, + .sff_exec_command = pdc_exec_command_mmio, + .check_atapi_dma = pdc_check_atapi_dma, .qc_prep = pdc_qc_prep, - .qc_issue = pdc_qc_issue_prot, - .eng_timeout = pdc_eng_timeout, - .data_xfer = ata_mmio_data_xfer, - .irq_handler = pdc_interrupt, - .irq_clear = pdc_irq_clear, + .qc_issue = pdc_qc_issue, + + .sff_irq_clear = pdc_irq_clear, + .lost_interrupt = ATA_OP_NULL, + .post_internal_cmd = pdc_post_internal_cmd, + .error_handler = pdc_error_handler, +}; + +static struct ata_port_operations pdc_sata_ops = { + .inherits = &pdc_common_ops, + .cable_detect = pdc_sata_cable_detect, + .freeze = pdc_sata_freeze, + .thaw = pdc_sata_thaw, .scr_read = pdc_sata_scr_read, .scr_write = pdc_sata_scr_write, - .port_start = pdc_port_start, - .port_stop = pdc_port_stop, - .host_stop = pdc_host_stop, + .port_start = pdc_sata_port_start, + .hardreset = pdc_sata_hardreset, }; -static const struct ata_port_operations pdc_pata_ops = { - .port_disable = ata_port_disable, - .tf_load = pdc_tf_load_mmio, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = pdc_exec_command_mmio, - .dev_select = ata_std_dev_select, - - .phy_reset = pdc_pata_phy_reset, +/* First-generation chips need a more restrictive ->check_atapi_dma op, + and ->freeze/thaw that ignore the hotplug controls. */ +static struct ata_port_operations pdc_old_sata_ops = { + .inherits = &pdc_sata_ops, + .freeze = pdc_freeze, + .thaw = pdc_thaw, + .check_atapi_dma = pdc_old_sata_check_atapi_dma, +}; - .qc_prep = pdc_qc_prep, - .qc_issue = pdc_qc_issue_prot, - .data_xfer = ata_mmio_data_xfer, - .eng_timeout = pdc_eng_timeout, - .irq_handler = pdc_interrupt, - .irq_clear = pdc_irq_clear, - - .port_start = pdc_port_start, - .port_stop = pdc_port_stop, - .host_stop = pdc_host_stop, +static struct ata_port_operations pdc_pata_ops = { + .inherits = &pdc_common_ops, + .cable_detect = pdc_pata_cable_detect, + .freeze = pdc_freeze, + .thaw = pdc_thaw, + .port_start = pdc_common_port_start, + .softreset = pdc_pata_softreset, }; static const struct ata_port_info pdc_port_info[] = { - /* board_2037x */ + [board_2037x] = { - .sht = &pdc_ata_sht, - .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 ; FIXME */ - .port_ops = &pdc_sata_ops, + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA | + PDC_FLAG_SATA_PATA, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &pdc_old_sata_ops, }, - /* board_20319 */ + [board_2037x_pata] = { - .sht = &pdc_ata_sht, - .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 ; FIXME */ - .port_ops = &pdc_sata_ops, + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &pdc_pata_ops, }, - /* board_20619 */ + [board_20319] = { - .sht = &pdc_ata_sht, - .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA | + PDC_FLAG_4_PORTS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &pdc_old_sata_ops, + }, + + [board_20619] = + { + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS | + PDC_FLAG_4_PORTS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &pdc_pata_ops, }, - /* board_20771 */ + [board_2057x] = { - .sht = &pdc_ata_sht, - .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA | + PDC_FLAG_GEN_II | PDC_FLAG_SATA_PATA, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &pdc_sata_ops, }, - /* board_2057x */ + [board_2057x_pata] = { - .sht = &pdc_ata_sht, - .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 ; FIXME */ - .port_ops = &pdc_sata_ops, + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS | + PDC_FLAG_GEN_II, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &pdc_pata_ops, }, - /* board_40518 */ + [board_40518] = { - .sht = &pdc_ata_sht, - .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA | + PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &pdc_sata_ops, }, }; static const struct pci_device_id pdc_ata_pci_tbl[] = { - { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_2037x }, - { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_2037x }, - { PCI_VENDOR_ID_PROMISE, 0x3571, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_2037x }, - { PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_2037x }, - { PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_2037x }, - { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_2037x }, - { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_2057x }, - { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_2057x }, - { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_2037x }, - - { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_20319 }, - { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_20319 }, - { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_20319 }, - { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_20319 }, - { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_20319 }, - { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_40518 }, - - { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_20619 }, - -/* TODO: remove all associated board_20771 code, as it completely - * duplicates board_2037x code, unless reason for separation can be - * divined. - */ -#if 0 - { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_20771 }, -#endif + { PCI_VDEVICE(PROMISE, 0x3371), board_2037x }, + { PCI_VDEVICE(PROMISE, 0x3373), board_2037x }, + { PCI_VDEVICE(PROMISE, 0x3375), board_2037x }, + { PCI_VDEVICE(PROMISE, 0x3376), board_2037x }, + { PCI_VDEVICE(PROMISE, 0x3570), board_2057x }, + { PCI_VDEVICE(PROMISE, 0x3571), board_2057x }, + { PCI_VDEVICE(PROMISE, 0x3574), board_2057x }, + { PCI_VDEVICE(PROMISE, 0x3577), board_2057x }, + { PCI_VDEVICE(PROMISE, 0x3d73), board_2057x }, + { PCI_VDEVICE(PROMISE, 0x3d75), board_2057x }, + + { PCI_VDEVICE(PROMISE, 0x3318), board_20319 }, + { PCI_VDEVICE(PROMISE, 0x3319), board_20319 }, + { PCI_VDEVICE(PROMISE, 0x3515), board_40518 }, + { PCI_VDEVICE(PROMISE, 0x3519), board_40518 }, + { PCI_VDEVICE(PROMISE, 0x3d17), board_40518 }, + { PCI_VDEVICE(PROMISE, 0x3d18), board_40518 }, + + { PCI_VDEVICE(PROMISE, 0x6629), board_20619 }, { } /* terminate list */ }; - static struct pci_driver pdc_ata_pci_driver = { .name = DRV_NAME, .id_table = pdc_ata_pci_tbl, @@ -289,127 +328,325 @@ static struct pci_driver pdc_ata_pci_driver = { .remove = ata_pci_remove_one, }; - -static int pdc_port_start(struct ata_port *ap) +static int pdc_common_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct pdc_port_priv *pp; int rc; - rc = ata_port_start(ap); + /* we use the same prd table as bmdma, allocate it */ + rc = ata_bmdma_port_start(ap); if (rc) return rc; - pp = kzalloc(sizeof(*pp), GFP_KERNEL); - if (!pp) { - rc = -ENOMEM; - goto err_out; - } + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; - pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); - if (!pp->pkt) { - rc = -ENOMEM; - goto err_out_kfree; - } + pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); + if (!pp->pkt) + return -ENOMEM; ap->private_data = pp; return 0; - -err_out_kfree: - kfree(pp); -err_out: - ata_port_stop(ap); - return rc; } - -static void pdc_port_stop(struct ata_port *ap) +static int pdc_sata_port_start(struct ata_port *ap) { - struct device *dev = ap->host->dev; - struct pdc_port_priv *pp = ap->private_data; + int rc; - ap->private_data = NULL; - dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma); - kfree(pp); - ata_port_stop(ap); + rc = pdc_common_port_start(ap); + if (rc) + return rc; + + /* fix up PHYMODE4 align timing */ + if (ap->flags & PDC_FLAG_GEN_II) { + void __iomem *sata_mmio = ap->ioaddr.scr_addr; + unsigned int tmp; + + tmp = readl(sata_mmio + PDC_PHYMODE4); + tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */ + writel(tmp, sata_mmio + PDC_PHYMODE4); + } + + return 0; } +static void pdc_fpdma_clear_interrupt_flag(struct ata_port *ap) +{ + void __iomem *sata_mmio = ap->ioaddr.scr_addr; + u32 tmp; -static void pdc_host_stop(struct ata_host *host) + tmp = readl(sata_mmio + PDC_FPDMA_CTLSTAT); + tmp |= PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG; + tmp |= PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG; + + /* It's not allowed to write to the entire FPDMA_CTLSTAT register + when NCQ is running. So do a byte-sized write to bits 10 and 11. */ + writeb(tmp >> 8, sata_mmio + PDC_FPDMA_CTLSTAT + 1); + readb(sata_mmio + PDC_FPDMA_CTLSTAT + 1); /* flush */ +} + +static void pdc_fpdma_reset(struct ata_port *ap) { - struct pdc_host_priv *hp = host->private_data; + void __iomem *sata_mmio = ap->ioaddr.scr_addr; + u8 tmp; - ata_pci_host_stop(host); + tmp = (u8)readl(sata_mmio + PDC_FPDMA_CTLSTAT); + tmp &= 0x7F; + tmp |= PDC_FPDMA_CTLSTAT_RESET; + writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT); + readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */ + udelay(100); + tmp &= ~PDC_FPDMA_CTLSTAT_RESET; + writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT); + readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */ + + pdc_fpdma_clear_interrupt_flag(ap); +} + +static void pdc_not_at_command_packet_phase(struct ata_port *ap) +{ + void __iomem *sata_mmio = ap->ioaddr.scr_addr; + unsigned int i; + u32 tmp; - kfree(hp); + /* check not at ASIC packet command phase */ + for (i = 0; i < 100; ++i) { + writel(0, sata_mmio + PDC_INTERNAL_DEBUG_1); + tmp = readl(sata_mmio + PDC_INTERNAL_DEBUG_2); + if ((tmp & 0xF) != 1) + break; + udelay(100); + } } +static void pdc_clear_internal_debug_record_error_register(struct ata_port *ap) +{ + void __iomem *sata_mmio = ap->ioaddr.scr_addr; + + writel(0xffffffff, sata_mmio + PDC_SATA_ERROR); + writel(0xffff0000, sata_mmio + PDC_LINK_LAYER_ERRORS); +} static void pdc_reset_port(struct ata_port *ap) { - void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; + void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; unsigned int i; u32 tmp; + if (ap->flags & PDC_FLAG_GEN_II) + pdc_not_at_command_packet_phase(ap); + + tmp = readl(ata_ctlstat_mmio); + tmp |= PDC_RESET; + writel(tmp, ata_ctlstat_mmio); + for (i = 11; i > 0; i--) { - tmp = readl(mmio); + tmp = readl(ata_ctlstat_mmio); if (tmp & PDC_RESET) break; udelay(100); tmp |= PDC_RESET; - writel(tmp, mmio); + writel(tmp, ata_ctlstat_mmio); } tmp &= ~PDC_RESET; - writel(tmp, mmio); - readl(mmio); /* flush */ -} + writel(tmp, ata_ctlstat_mmio); + readl(ata_ctlstat_mmio); /* flush */ -static void pdc_sata_phy_reset(struct ata_port *ap) -{ - pdc_reset_port(ap); - sata_phy_reset(ap); + if (sata_scr_valid(&ap->link) && (ap->flags & PDC_FLAG_GEN_II)) { + pdc_fpdma_reset(ap); + pdc_clear_internal_debug_record_error_register(ap); + } } -static void pdc_pata_cbl_detect(struct ata_port *ap) +static int pdc_pata_cable_detect(struct ata_port *ap) { u8 tmp; - void __iomem *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03; + void __iomem *ata_mmio = ap->ioaddr.cmd_addr; - tmp = readb(mmio); + tmp = readb(ata_mmio + PDC_CTLSTAT + 3); + if (tmp & 0x01) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; +} - if (tmp & 0x01) { - ap->cbl = ATA_CBL_PATA40; - ap->udma_mask &= ATA_UDMA_MASK_40C; - } else - ap->cbl = ATA_CBL_PATA80; +static int pdc_sata_cable_detect(struct ata_port *ap) +{ + return ATA_CBL_SATA; } -static void pdc_pata_phy_reset(struct ata_port *ap) +static int pdc_sata_scr_read(struct ata_link *link, + unsigned int sc_reg, u32 *val) { - pdc_pata_cbl_detect(ap); - pdc_reset_port(ap); - ata_port_probe(ap); - ata_bus_reset(ap); + if (sc_reg > SCR_CONTROL) + return -EINVAL; + *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } -static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int pdc_sata_scr_write(struct ata_link *link, + unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; - return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); + return -EINVAL; + writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } +static void pdc_atapi_pkt(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + dma_addr_t sg_table = ap->bmdma_prd_dma; + unsigned int cdb_len = qc->dev->cdb_len; + u8 *cdb = qc->cdb; + struct pdc_port_priv *pp = ap->private_data; + u8 *buf = pp->pkt; + __le32 *buf32 = (__le32 *) buf; + unsigned int dev_sel, feature; -static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, - u32 val) + /* set control bits (byte 0), zero delay seq id (byte 3), + * and seq id (byte 2) + */ + switch (qc->tf.protocol) { + case ATAPI_PROT_DMA: + if (!(qc->tf.flags & ATA_TFLAG_WRITE)) + buf32[0] = cpu_to_le32(PDC_PKT_READ); + else + buf32[0] = 0; + break; + case ATAPI_PROT_NODATA: + buf32[0] = cpu_to_le32(PDC_PKT_NODATA); + break; + default: + BUG(); + break; + } + buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */ + buf32[2] = 0; /* no next-packet */ + + /* select drive */ + if (sata_scr_valid(&ap->link)) + dev_sel = PDC_DEVICE_SATA; + else + dev_sel = qc->tf.device; + + buf[12] = (1 << 5) | ATA_REG_DEVICE; + buf[13] = dev_sel; + buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY; + buf[15] = dev_sel; /* once more, waiting for BSY to clear */ + + buf[16] = (1 << 5) | ATA_REG_NSECT; + buf[17] = qc->tf.nsect; + buf[18] = (1 << 5) | ATA_REG_LBAL; + buf[19] = qc->tf.lbal; + + /* set feature and byte counter registers */ + if (qc->tf.protocol != ATAPI_PROT_DMA) + feature = PDC_FEATURE_ATAPI_PIO; + else + feature = PDC_FEATURE_ATAPI_DMA; + + buf[20] = (1 << 5) | ATA_REG_FEATURE; + buf[21] = feature; + buf[22] = (1 << 5) | ATA_REG_BYTEL; + buf[23] = qc->tf.lbam; + buf[24] = (1 << 5) | ATA_REG_BYTEH; + buf[25] = qc->tf.lbah; + + /* send ATAPI packet command 0xA0 */ + buf[26] = (1 << 5) | ATA_REG_CMD; + buf[27] = qc->tf.command; + + /* select drive and check DRQ */ + buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY; + buf[29] = dev_sel; + + /* we can represent cdb lengths 2/4/6/8/10/12/14/16 */ + BUG_ON(cdb_len & ~0x1E); + + /* append the CDB as the final part */ + buf[30] = (((cdb_len >> 1) & 7) << 5) | ATA_REG_DATA | PDC_LAST_REG; + memcpy(buf+31, cdb, cdb_len); +} + +/** + * pdc_fill_sg - Fill PCI IDE PRD table + * @qc: Metadata associated with taskfile to be transferred + * + * Fill PCI IDE PRD (scatter-gather) table with segments + * associated with the current disk command. + * Make sure hardware does not choke on it. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + */ +static void pdc_fill_sg(struct ata_queued_cmd *qc) { - if (sc_reg > SCR_CONTROL) + struct ata_port *ap = qc->ap; + struct ata_bmdma_prd *prd = ap->bmdma_prd; + struct scatterlist *sg; + const u32 SG_COUNT_ASIC_BUG = 41*4; + unsigned int si, idx; + u32 len; + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return; - writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); + + idx = 0; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + u32 addr, offset; + u32 sg_len; + + /* determine if physical DMA addr spans 64K boundary. + * Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + offset = addr & 0xffff; + len = sg_len; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + prd[idx].addr = cpu_to_le32(addr); + prd[idx].flags_len = cpu_to_le32(len & 0xffff); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); + + idx++; + sg_len -= len; + addr += len; + } + } + + len = le32_to_cpu(prd[idx - 1].flags_len); + + if (len > SG_COUNT_ASIC_BUG) { + u32 addr; + + VPRINTK("Splitting last PRD.\n"); + + addr = le32_to_cpu(prd[idx - 1].addr); + prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG); + + addr = addr + len - SG_COUNT_ASIC_BUG; + len = SG_COUNT_ASIC_BUG; + prd[idx].addr = cpu_to_le32(addr); + prd[idx].flags_len = cpu_to_le32(len); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); + + idx++; + } + + prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); } static void pdc_qc_prep(struct ata_queued_cmd *qc) @@ -421,145 +658,352 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) switch (qc->tf.protocol) { case ATA_PROT_DMA: - ata_qc_prep(qc); - /* fall through */ - + pdc_fill_sg(qc); + /*FALLTHROUGH*/ case ATA_PROT_NODATA: - i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma, + i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma, qc->dev->devno, pp->pkt); - if (qc->tf.flags & ATA_TFLAG_LBA48) i = pdc_prep_lba48(&qc->tf, pp->pkt, i); else i = pdc_prep_lba28(&qc->tf, pp->pkt, i); - pdc_pkt_footer(&qc->tf, pp->pkt, i); break; - + case ATAPI_PROT_PIO: + pdc_fill_sg(qc); + break; + case ATAPI_PROT_DMA: + pdc_fill_sg(qc); + /*FALLTHROUGH*/ + case ATAPI_PROT_NODATA: + pdc_atapi_pkt(qc); + break; default: break; } } -static void pdc_eng_timeout(struct ata_port *ap) +static int pdc_is_sataii_tx4(unsigned long flags) { - struct ata_host *host = ap->host; - u8 drv_stat; - struct ata_queued_cmd *qc; - unsigned long flags; + const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS; + return (flags & mask) == mask; +} - DPRINTK("ENTER\n"); +static unsigned int pdc_port_no_to_ata_no(unsigned int port_no, + int is_sataii_tx4) +{ + static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2}; + return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no; +} - spin_lock_irqsave(&host->lock, flags); +static unsigned int pdc_sata_nr_ports(const struct ata_port *ap) +{ + return (ap->flags & PDC_FLAG_4_PORTS) ? 4 : 2; +} - qc = ata_qc_from_tag(ap, ap->active_tag); +static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap) +{ + const struct ata_host *host = ap->host; + unsigned int nr_ports = pdc_sata_nr_ports(ap); + unsigned int i; - switch (qc->tf.protocol) { - case ATA_PROT_DMA: - case ATA_PROT_NODATA: - ata_port_printk(ap, KERN_ERR, "command timeout\n"); - drv_stat = ata_wait_idle(ap); - qc->err_mask |= __ac_err_mask(drv_stat); - break; + for (i = 0; i < nr_ports && host->ports[i] != ap; ++i) + ; + BUG_ON(i >= nr_ports); + return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags)); +} - default: - drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); +static void pdc_freeze(struct ata_port *ap) +{ + void __iomem *ata_mmio = ap->ioaddr.cmd_addr; + u32 tmp; - ata_port_printk(ap, KERN_ERR, - "unknown timeout, cmd 0x%x stat 0x%x\n", - qc->tf.command, drv_stat); + tmp = readl(ata_mmio + PDC_CTLSTAT); + tmp |= PDC_IRQ_DISABLE; + tmp &= ~PDC_DMA_ENABLE; + writel(tmp, ata_mmio + PDC_CTLSTAT); + readl(ata_mmio + PDC_CTLSTAT); /* flush */ +} - qc->err_mask |= ac_err_mask(drv_stat); - break; - } +static void pdc_sata_freeze(struct ata_port *ap) +{ + struct ata_host *host = ap->host; + void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR]; + unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR; + unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap); + u32 hotplug_status; + + /* Disable hotplug events on this port. + * + * Locking: + * 1) hotplug register accesses must be serialised via host->lock + * 2) ap->lock == &ap->host->lock + * 3) ->freeze() and ->thaw() are called with ap->lock held + */ + hotplug_status = readl(host_mmio + hotplug_offset); + hotplug_status |= 0x11 << (ata_no + 16); + writel(hotplug_status, host_mmio + hotplug_offset); + readl(host_mmio + hotplug_offset); /* flush */ - spin_unlock_irqrestore(&host->lock, flags); - ata_eh_qc_complete(qc); - DPRINTK("EXIT\n"); + pdc_freeze(ap); } -static inline unsigned int pdc_host_intr( struct ata_port *ap, - struct ata_queued_cmd *qc) +static void pdc_thaw(struct ata_port *ap) { - unsigned int handled = 0; + void __iomem *ata_mmio = ap->ioaddr.cmd_addr; u32 tmp; - void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL; - tmp = readl(mmio); - if (tmp & PDC_ERR_MASK) { - qc->err_mask |= AC_ERR_DEV; + /* clear IRQ */ + readl(ata_mmio + PDC_COMMAND); + + /* turn IRQ back on */ + tmp = readl(ata_mmio + PDC_CTLSTAT); + tmp &= ~PDC_IRQ_DISABLE; + writel(tmp, ata_mmio + PDC_CTLSTAT); + readl(ata_mmio + PDC_CTLSTAT); /* flush */ +} + +static void pdc_sata_thaw(struct ata_port *ap) +{ + struct ata_host *host = ap->host; + void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR]; + unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR; + unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap); + u32 hotplug_status; + + pdc_thaw(ap); + + /* Enable hotplug events on this port. + * Locking: see pdc_sata_freeze(). + */ + hotplug_status = readl(host_mmio + hotplug_offset); + hotplug_status |= 0x11 << ata_no; + hotplug_status &= ~(0x11 << (ata_no + 16)); + writel(hotplug_status, host_mmio + hotplug_offset); + readl(host_mmio + hotplug_offset); /* flush */ +} + +static int pdc_pata_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + pdc_reset_port(link->ap); + return ata_sff_softreset(link, class, deadline); +} + +static unsigned int pdc_ata_port_to_ata_no(const struct ata_port *ap) +{ + void __iomem *ata_mmio = ap->ioaddr.cmd_addr; + void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; + + /* ata_mmio == host_mmio + 0x200 + ata_no * 0x80 */ + return (ata_mmio - host_mmio - 0x200) / 0x80; +} + +static void pdc_hard_reset_port(struct ata_port *ap) +{ + void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; + void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1; + unsigned int ata_no = pdc_ata_port_to_ata_no(ap); + struct pdc_host_priv *hpriv = ap->host->private_data; + u8 tmp; + + spin_lock(&hpriv->hard_reset_lock); + + tmp = readb(pcictl_b1_mmio); + tmp &= ~(0x10 << ata_no); + writeb(tmp, pcictl_b1_mmio); + readb(pcictl_b1_mmio); /* flush */ + udelay(100); + tmp |= (0x10 << ata_no); + writeb(tmp, pcictl_b1_mmio); + readb(pcictl_b1_mmio); /* flush */ + + spin_unlock(&hpriv->hard_reset_lock); +} + +static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + if (link->ap->flags & PDC_FLAG_GEN_II) + pdc_not_at_command_packet_phase(link->ap); + /* hotplug IRQs should have been masked by pdc_sata_freeze() */ + pdc_hard_reset_port(link->ap); + pdc_reset_port(link->ap); + + /* sata_promise can't reliably acquire the first D2H Reg FIS + * after hardreset. Do non-waiting hardreset and request + * follow-up SRST. + */ + return sata_std_hardreset(link, class, deadline); +} + +static void pdc_error_handler(struct ata_port *ap) +{ + if (!(ap->pflags & ATA_PFLAG_FROZEN)) + pdc_reset_port(ap); + + ata_sff_error_handler(ap); +} + +static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + /* make DMA engine forget about the failed command */ + if (qc->flags & ATA_QCFLAG_FAILED) pdc_reset_port(ap); +} + +static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc, + u32 port_status, u32 err_mask) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + unsigned int ac_err_mask = 0; + + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "port_status 0x%08x", port_status); + port_status &= err_mask; + + if (port_status & PDC_DRIVE_ERR) + ac_err_mask |= AC_ERR_DEV; + if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR)) + ac_err_mask |= AC_ERR_OTHER; + if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR)) + ac_err_mask |= AC_ERR_ATA_BUS; + if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR + | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR)) + ac_err_mask |= AC_ERR_HOST_BUS; + + if (sata_scr_valid(&ap->link)) { + u32 serror; + + pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror); + ehi->serror |= serror; + } + + qc->err_mask |= ac_err_mask; + + pdc_reset_port(ap); + + ata_port_abort(ap); +} + +static unsigned int pdc_host_intr(struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + unsigned int handled = 0; + void __iomem *ata_mmio = ap->ioaddr.cmd_addr; + u32 port_status, err_mask; + + err_mask = PDC_ERR_MASK; + if (ap->flags & PDC_FLAG_GEN_II) + err_mask &= ~PDC1_ERR_MASK; + else + err_mask &= ~PDC2_ERR_MASK; + port_status = readl(ata_mmio + PDC_GLOBAL_CTL); + if (unlikely(port_status & err_mask)) { + pdc_error_intr(ap, qc, port_status, err_mask); + return 1; } switch (qc->tf.protocol) { case ATA_PROT_DMA: case ATA_PROT_NODATA: + case ATAPI_PROT_DMA: + case ATAPI_PROT_NODATA: qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); ata_qc_complete(qc); handled = 1; break; - - default: + default: ap->stats.idle_irq++; break; - } + } return handled; } static void pdc_irq_clear(struct ata_port *ap) { - struct ata_host *host = ap->host; - void __iomem *mmio = host->mmio_base; + void __iomem *ata_mmio = ap->ioaddr.cmd_addr; - readl(mmio + PDC_INT_SEQMASK); + readl(ata_mmio + PDC_COMMAND); } -static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +static irqreturn_t pdc_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; struct ata_port *ap; u32 mask = 0; unsigned int i, tmp; unsigned int handled = 0; - void __iomem *mmio_base; + void __iomem *host_mmio; + unsigned int hotplug_offset, ata_no; + u32 hotplug_status; + int is_sataii_tx4; VPRINTK("ENTER\n"); - if (!host || !host->mmio_base) { + if (!host || !host->iomap[PDC_MMIO_BAR]) { VPRINTK("QUICK EXIT\n"); return IRQ_NONE; } - mmio_base = host->mmio_base; + host_mmio = host->iomap[PDC_MMIO_BAR]; + + spin_lock(&host->lock); + + /* read and clear hotplug flags for all ports */ + if (host->ports[0]->flags & PDC_FLAG_GEN_II) { + hotplug_offset = PDC2_SATA_PLUG_CSR; + hotplug_status = readl(host_mmio + hotplug_offset); + if (hotplug_status & 0xff) + writel(hotplug_status | 0xff, host_mmio + hotplug_offset); + hotplug_status &= 0xff; /* clear uninteresting bits */ + } else + hotplug_status = 0; /* reading should also clear interrupts */ - mask = readl(mmio_base + PDC_INT_SEQMASK); + mask = readl(host_mmio + PDC_INT_SEQMASK); - if (mask == 0xffffffff) { + if (mask == 0xffffffff && hotplug_status == 0) { VPRINTK("QUICK EXIT 2\n"); - return IRQ_NONE; + goto done_irq; } - spin_lock(&host->lock); - - mask &= 0xffff; /* only 16 tags possible */ - if (!mask) { + mask &= 0xffff; /* only 16 SEQIDs possible */ + if (mask == 0 && hotplug_status == 0) { VPRINTK("QUICK EXIT 3\n"); goto done_irq; } - writel(mask, mmio_base + PDC_INT_SEQMASK); + writel(mask, host_mmio + PDC_INT_SEQMASK); + + is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags); for (i = 0; i < host->n_ports; i++) { VPRINTK("port %u\n", i); ap = host->ports[i]; + + /* check for a plug or unplug event */ + ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); + tmp = hotplug_status & (0x11 << ata_no); + if (tmp) { + struct ata_eh_info *ehi = &ap->link.eh_info; + ata_ehi_clear_desc(ehi); + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp); + ata_port_freeze(ap); + ++handled; + continue; + } + + /* check for a packet interrupt */ tmp = mask & (1 << (i + 1)); - if (tmp && ap && - !(ap->flags & ATA_FLAG_DISABLED)) { + if (tmp) { struct ata_queued_cmd *qc; - qc = ata_qc_from_tag(ap, ap->active_tag); + qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) handled += pdc_host_intr(ap, qc); } @@ -572,273 +1016,253 @@ done_irq: return IRQ_RETVAL(handled); } -static inline void pdc_packet_start(struct ata_queued_cmd *qc) +static void pdc_packet_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pdc_port_priv *pp = ap->private_data; + void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; + void __iomem *ata_mmio = ap->ioaddr.cmd_addr; unsigned int port_no = ap->port_no; u8 seq = (u8) (port_no + 1); VPRINTK("ENTER, ap %p\n", ap); - writel(0x00000001, ap->host->mmio_base + (seq * 4)); - readl(ap->host->mmio_base + (seq * 4)); /* flush */ + writel(0x00000001, host_mmio + (seq * 4)); + readl(host_mmio + (seq * 4)); /* flush */ pp->pkt[2] = seq; wmb(); /* flush PRD, pkt writes */ - writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); - readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ + writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT); + readl(ata_mmio + PDC_PKT_SUBMIT); /* flush */ } -static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc) +static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc) { switch (qc->tf.protocol) { - case ATA_PROT_DMA: + case ATAPI_PROT_NODATA: + if (qc->dev->flags & ATA_DFLAG_CDB_INTR) + break; + /*FALLTHROUGH*/ case ATA_PROT_NODATA: + if (qc->tf.flags & ATA_TFLAG_POLLING) + break; + /*FALLTHROUGH*/ + case ATAPI_PROT_DMA: + case ATA_PROT_DMA: pdc_packet_start(qc); return 0; - - case ATA_PROT_ATAPI_DMA: - BUG(); - break; - default: break; } - - return ata_qc_issue_prot(qc); + return ata_sff_qc_issue(qc); } static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { - WARN_ON (tf->protocol == ATA_PROT_DMA || - tf->protocol == ATA_PROT_NODATA); - ata_tf_load(ap, tf); + WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); + ata_sff_tf_load(ap, tf); } - -static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) +static void pdc_exec_command_mmio(struct ata_port *ap, + const struct ata_taskfile *tf) { - WARN_ON (tf->protocol == ATA_PROT_DMA || - tf->protocol == ATA_PROT_NODATA); - ata_exec_command(ap, tf); + WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); + ata_sff_exec_command(ap, tf); } +static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) +{ + u8 *scsicmd = qc->scsicmd->cmnd; + int pio = 1; /* atapi dma off by default */ + + /* Whitelist commands that may use DMA. */ + switch (scsicmd[0]) { + case WRITE_12: + case WRITE_10: + case WRITE_6: + case READ_12: + case READ_10: + case READ_6: + case 0xad: /* READ_DVD_STRUCTURE */ + case 0xbe: /* READ_CD */ + pio = 0; + } + /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */ + if (scsicmd[0] == WRITE_10) { + unsigned int lba = + (scsicmd[2] << 24) | + (scsicmd[3] << 16) | + (scsicmd[4] << 8) | + scsicmd[5]; + if (lba >= 0xFFFF4FA2) + pio = 1; + } + return pio; +} -static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base) +static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc) { - port->cmd_addr = base; - port->data_addr = base; - port->feature_addr = - port->error_addr = base + 0x4; - port->nsect_addr = base + 0x8; - port->lbal_addr = base + 0xc; - port->lbam_addr = base + 0x10; - port->lbah_addr = base + 0x14; - port->device_addr = base + 0x18; - port->command_addr = - port->status_addr = base + 0x1c; - port->altstatus_addr = - port->ctl_addr = base + 0x38; + /* First generation chips cannot use ATAPI DMA on SATA ports */ + return 1; } +static void pdc_ata_setup_port(struct ata_port *ap, + void __iomem *base, void __iomem *scr_addr) +{ + ap->ioaddr.cmd_addr = base; + ap->ioaddr.data_addr = base; + ap->ioaddr.feature_addr = + ap->ioaddr.error_addr = base + 0x4; + ap->ioaddr.nsect_addr = base + 0x8; + ap->ioaddr.lbal_addr = base + 0xc; + ap->ioaddr.lbam_addr = base + 0x10; + ap->ioaddr.lbah_addr = base + 0x14; + ap->ioaddr.device_addr = base + 0x18; + ap->ioaddr.command_addr = + ap->ioaddr.status_addr = base + 0x1c; + ap->ioaddr.altstatus_addr = + ap->ioaddr.ctl_addr = base + 0x38; + ap->ioaddr.scr_addr = scr_addr; +} -static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) +static void pdc_host_init(struct ata_host *host) { - void __iomem *mmio = pe->mmio_base; - struct pdc_host_priv *hp = pe->private_data; - int hotplug_offset = hp->hotplug_offset; + void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR]; + int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II; + int hotplug_offset; u32 tmp; + if (is_gen2) + hotplug_offset = PDC2_SATA_PLUG_CSR; + else + hotplug_offset = PDC_SATA_PLUG_CSR; + /* * Except for the hotplug stuff, this is voodoo from the * Promise driver. Label this entire section * "TODO: figure out why we do this" */ - /* change FIFO_SHD to 8 dwords, enable BMR_BURST */ - tmp = readl(mmio + PDC_FLASH_CTL); - tmp |= 0x12000; /* bit 16 (fifo 8 dw) and 13 (bmr burst?) */ - writel(tmp, mmio + PDC_FLASH_CTL); + /* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */ + tmp = readl(host_mmio + PDC_FLASH_CTL); + tmp |= 0x02000; /* bit 13 (enable bmr burst) */ + if (!is_gen2) + tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */ + writel(tmp, host_mmio + PDC_FLASH_CTL); /* clear plug/unplug flags for all ports */ - tmp = readl(mmio + hotplug_offset); - writel(tmp | 0xff, mmio + hotplug_offset); + tmp = readl(host_mmio + hotplug_offset); + writel(tmp | 0xff, host_mmio + hotplug_offset); - /* mask plug/unplug ints */ - tmp = readl(mmio + hotplug_offset); - writel(tmp | 0xff0000, mmio + hotplug_offset); + tmp = readl(host_mmio + hotplug_offset); + if (is_gen2) /* unmask plug/unplug ints */ + writel(tmp & ~0xff0000, host_mmio + hotplug_offset); + else /* mask plug/unplug ints */ + writel(tmp | 0xff0000, host_mmio + hotplug_offset); + + /* don't initialise TBG or SLEW on 2nd generation chips */ + if (is_gen2) + return; /* reduce TBG clock to 133 Mhz. */ - tmp = readl(mmio + PDC_TBG_MODE); + tmp = readl(host_mmio + PDC_TBG_MODE); tmp &= ~0x30000; /* clear bit 17, 16*/ tmp |= 0x10000; /* set bit 17:16 = 0:1 */ - writel(tmp, mmio + PDC_TBG_MODE); + writel(tmp, host_mmio + PDC_TBG_MODE); - readl(mmio + PDC_TBG_MODE); /* flush */ + readl(host_mmio + PDC_TBG_MODE); /* flush */ msleep(10); /* adjust slew rate control register. */ - tmp = readl(mmio + PDC_SLEW_CTL); + tmp = readl(host_mmio + PDC_SLEW_CTL); tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */ tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */ - writel(tmp, mmio + PDC_SLEW_CTL); + writel(tmp, host_mmio + PDC_SLEW_CTL); } -static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +static int pdc_ata_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { - static int printed_version; - struct ata_probe_ent *probe_ent = NULL; - struct pdc_host_priv *hp; - unsigned long base; - void __iomem *mmio_base; - unsigned int board_idx = (unsigned int) ent->driver_data; - int pci_dev_busy = 0; - int rc; - - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - - rc = pci_enable_device(pdev); + const struct ata_port_info *pi = &pdc_port_info[ent->driver_data]; + const struct ata_port_info *ppi[PDC_MAX_PORTS]; + struct ata_host *host; + struct pdc_host_priv *hpriv; + void __iomem *host_mmio; + int n_ports, i, rc; + int is_sataii_tx4; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + /* enable and acquire resources */ + rc = pcim_enable_device(pdev); if (rc) return rc; - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - pci_dev_busy = 1; - goto err_out; - } - - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); if (rc) - goto err_out_regions; - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; - - probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); - if (probe_ent == NULL) { - rc = -ENOMEM; - goto err_out_regions; - } - - probe_ent->dev = pci_dev_to_dev(pdev); - INIT_LIST_HEAD(&probe_ent->node); - - mmio_base = pci_iomap(pdev, 3, 0); - if (mmio_base == NULL) { - rc = -ENOMEM; - goto err_out_free_ent; + return rc; + host_mmio = pcim_iomap_table(pdev)[PDC_MMIO_BAR]; + + /* determine port configuration and setup host */ + n_ports = 2; + if (pi->flags & PDC_FLAG_4_PORTS) + n_ports = 4; + for (i = 0; i < n_ports; i++) + ppi[i] = pi; + + if (pi->flags & PDC_FLAG_SATA_PATA) { + u8 tmp = readb(host_mmio + PDC_FLASH_CTL + 1); + if (!(tmp & 0x80)) + ppi[n_ports++] = pi + 1; } - base = (unsigned long) mmio_base; - hp = kzalloc(sizeof(*hp), GFP_KERNEL); - if (hp == NULL) { - rc = -ENOMEM; - goto err_out_free_ent; + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); + if (!host) { + dev_err(&pdev->dev, "failed to allocate host\n"); + return -ENOMEM; } + hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + spin_lock_init(&hpriv->hard_reset_lock); + host->private_data = hpriv; + host->iomap = pcim_iomap_table(pdev); + + is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags); + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); + unsigned int ata_offset = 0x200 + ata_no * 0x80; + unsigned int scr_offset = 0x400 + ata_no * 0x100; - /* Set default hotplug offset */ - hp->hotplug_offset = PDC_SATA_PLUG_CSR; - probe_ent->private_data = hp; - - probe_ent->sht = pdc_port_info[board_idx].sht; - probe_ent->port_flags = pdc_port_info[board_idx].flags; - probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; - probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask; - probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask; - probe_ent->port_ops = pdc_port_info[board_idx].port_ops; - - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->mmio_base = mmio_base; - - pdc_ata_setup_port(&probe_ent->port[0], base + 0x200); - pdc_ata_setup_port(&probe_ent->port[1], base + 0x280); - - probe_ent->port[0].scr_addr = base + 0x400; - probe_ent->port[1].scr_addr = base + 0x500; - - /* notice 4-port boards */ - switch (board_idx) { - case board_40518: - /* Override hotplug offset for SATAII150 */ - hp->hotplug_offset = PDC2_SATA_PLUG_CSR; - /* Fall through */ - case board_20319: - probe_ent->n_ports = 4; - - pdc_ata_setup_port(&probe_ent->port[2], base + 0x300); - pdc_ata_setup_port(&probe_ent->port[3], base + 0x380); - - probe_ent->port[2].scr_addr = base + 0x600; - probe_ent->port[3].scr_addr = base + 0x700; - break; - case board_2057x: - /* Override hotplug offset for SATAII150 */ - hp->hotplug_offset = PDC2_SATA_PLUG_CSR; - /* Fall through */ - case board_2037x: - probe_ent->n_ports = 2; - break; - case board_20771: - probe_ent->n_ports = 2; - break; - case board_20619: - probe_ent->n_ports = 4; - - pdc_ata_setup_port(&probe_ent->port[2], base + 0x300); - pdc_ata_setup_port(&probe_ent->port[3], base + 0x380); + pdc_ata_setup_port(ap, host_mmio + ata_offset, host_mmio + scr_offset); - probe_ent->port[2].scr_addr = base + 0x600; - probe_ent->port[3].scr_addr = base + 0x700; - break; - default: - BUG(); - break; + ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); + ata_port_pbar_desc(ap, PDC_MMIO_BAR, ata_offset, "ata"); } - pci_set_master(pdev); - /* initialize adapter */ - pdc_host_init(board_idx, probe_ent); - - /* FIXME: Need any other frees than hp? */ - if (!ata_device_add(probe_ent)) - kfree(hp); - - kfree(probe_ent); - - return 0; - -err_out_free_ent: - kfree(probe_ent); -err_out_regions: - pci_release_regions(pdev); -err_out: - if (!pci_dev_busy) - pci_disable_device(pdev); - return rc; -} - - -static int __init pdc_ata_init(void) -{ - return pci_register_driver(&pdc_ata_pci_driver); -} + pdc_host_init(host); + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; -static void __exit pdc_ata_exit(void) -{ - pci_unregister_driver(&pdc_ata_pci_driver); + /* start host, request IRQ and attach */ + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, pdc_interrupt, IRQF_SHARED, + &pdc_ata_sht); } +module_pci_driver(pdc_ata_pci_driver); MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl); MODULE_VERSION(DRV_VERSION); - -module_init(pdc_ata_init); -module_exit(pdc_ata_exit); diff --git a/drivers/ata/sata_promise.h b/drivers/ata/sata_promise.h index 6ee5e190262..00d6000e546 100644 --- a/drivers/ata/sata_promise.h +++ b/drivers/ata/sata_promise.h @@ -46,7 +46,7 @@ static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf, unsigned int devno, u8 *buf) { u8 dev_reg; - u32 *buf32 = (u32 *) buf; + __le32 *buf32 = (__le32 *) buf; /* set control bits (byte 0), zero delay seq id (byte 3), * and seq id (byte 2) diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index fa29dfe2a7b..9a6bd4cd29a 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c @@ -29,21 +29,21 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/gfp.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> -#include <linux/sched.h> #include <linux/device.h> #include <scsi/scsi_host.h> -#include <asm/io.h> #include <linux/libata.h> #define DRV_NAME "sata_qstor" -#define DRV_VERSION "0.06" +#define DRV_VERSION "0.09" enum { + QS_MMIO_BAR = 4, + QS_PORTS = 4, QS_MAX_PRD = LIBATA_MAX_PRD, QS_CPB_ORDER = 6, @@ -103,7 +103,7 @@ enum { QS_DMA_BOUNDARY = ~0UL }; -typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t; +typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t; struct qs_port_priv { u8 *pkt; @@ -111,82 +111,58 @@ struct qs_port_priv { qs_state_t state; }; -static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); -static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs); +static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); +static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); +static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int qs_port_start(struct ata_port *ap); static void qs_host_stop(struct ata_host *host); -static void qs_port_stop(struct ata_port *ap); -static void qs_phy_reset(struct ata_port *ap); static void qs_qc_prep(struct ata_queued_cmd *qc); static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); static int qs_check_atapi_dma(struct ata_queued_cmd *qc); -static void qs_bmdma_stop(struct ata_queued_cmd *qc); -static u8 qs_bmdma_status(struct ata_port *ap); -static void qs_irq_clear(struct ata_port *ap); -static void qs_eng_timeout(struct ata_port *ap); +static void qs_freeze(struct ata_port *ap); +static void qs_thaw(struct ata_port *ap); +static int qs_prereset(struct ata_link *link, unsigned long deadline); +static void qs_error_handler(struct ata_port *ap); static struct scsi_host_template qs_ata_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, + ATA_BASE_SHT(DRV_NAME), .sg_tablesize = QS_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING, - .use_clustering = ENABLE_CLUSTERING, - .proc_name = DRV_NAME, .dma_boundary = QS_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, }; -static const struct ata_port_operations qs_ata_ops = { - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, +static struct ata_port_operations qs_ata_ops = { + .inherits = &ata_sff_port_ops, + .check_atapi_dma = qs_check_atapi_dma, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - .phy_reset = qs_phy_reset, .qc_prep = qs_qc_prep, .qc_issue = qs_qc_issue, - .data_xfer = ata_mmio_data_xfer, - .eng_timeout = qs_eng_timeout, - .irq_handler = qs_intr, - .irq_clear = qs_irq_clear, + + .freeze = qs_freeze, + .thaw = qs_thaw, + .prereset = qs_prereset, + .softreset = ATA_OP_NULL, + .error_handler = qs_error_handler, + .lost_interrupt = ATA_OP_NULL, + .scr_read = qs_scr_read, .scr_write = qs_scr_write, + .port_start = qs_port_start, - .port_stop = qs_port_stop, .host_stop = qs_host_stop, - .bmdma_stop = qs_bmdma_stop, - .bmdma_status = qs_bmdma_status, }; static const struct ata_port_info qs_port_info[] = { /* board_2068_idx */ { - .sht = &qs_ata_sht, - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_SATA_RESET | - //FIXME ATA_FLAG_SRST | - ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, - .pio_mask = 0x10, /* pio4 */ - .udma_mask = 0x7f, /* udma0-6 */ + .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING, + .pio_mask = ATA_PIO4_ONLY, + .udma_mask = ATA_UDMA6, .port_ops = &qs_ata_ops, }, }; static const struct pci_device_id qs_ata_pci_tbl[] = { - { PCI_VENDOR_ID_PDC, 0x2068, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_2068_idx }, + { PCI_VDEVICE(PDC, 0x2068), board_2068_idx }, { } /* terminate list */ }; @@ -198,74 +174,79 @@ static struct pci_driver qs_ata_pci_driver = { .remove = ata_pci_remove_one, }; -static int qs_check_atapi_dma(struct ata_queued_cmd *qc) +static void __iomem *qs_mmio_base(struct ata_host *host) { - return 1; /* ATAPI DMA not supported */ -} - -static void qs_bmdma_stop(struct ata_queued_cmd *qc) -{ - /* nothing */ + return host->iomap[QS_MMIO_BAR]; } -static u8 qs_bmdma_status(struct ata_port *ap) -{ - return 0; -} - -static void qs_irq_clear(struct ata_port *ap) +static int qs_check_atapi_dma(struct ata_queued_cmd *qc) { - /* nothing */ + return 1; /* ATAPI DMA not supported */ } static inline void qs_enter_reg_mode(struct ata_port *ap) { - u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000); + u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); + struct qs_port_priv *pp = ap->private_data; + pp->state = qs_state_mmio; writeb(QS_CTR0_REG, chan + QS_CCT_CTR0); readb(chan + QS_CCT_CTR0); /* flush */ } static inline void qs_reset_channel_logic(struct ata_port *ap) { - u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000); + u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1); readb(chan + QS_CCT_CTR0); /* flush */ qs_enter_reg_mode(ap); } -static void qs_phy_reset(struct ata_port *ap) +static void qs_freeze(struct ata_port *ap) { - struct qs_port_priv *pp = ap->private_data; + u8 __iomem *mmio_base = qs_mmio_base(ap->host); - pp->state = qs_state_idle; - qs_reset_channel_logic(ap); - sata_phy_reset(ap); + writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ + qs_enter_reg_mode(ap); } -static void qs_eng_timeout(struct ata_port *ap) +static void qs_thaw(struct ata_port *ap) { - struct qs_port_priv *pp = ap->private_data; + u8 __iomem *mmio_base = qs_mmio_base(ap->host); + + qs_enter_reg_mode(ap); + writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */ +} + +static int qs_prereset(struct ata_link *link, unsigned long deadline) +{ + struct ata_port *ap = link->ap; - if (pp->state != qs_state_idle) /* healthy paranoia */ - pp->state = qs_state_mmio; qs_reset_channel_logic(ap); - ata_eng_timeout(ap); + return ata_sff_prereset(link, deadline); } -static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return ~0U; - return readl((void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8))); + return -EINVAL; + *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8)); + return 0; } -static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static void qs_error_handler(struct ata_port *ap) +{ + qs_enter_reg_mode(ap); + ata_sff_error_handler(ap); +} + +static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return; - writel(val, (void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8))); + return -EINVAL; + writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8)); + return 0; } static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) @@ -273,14 +254,10 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) struct scatterlist *sg; struct ata_port *ap = qc->ap; struct qs_port_priv *pp = ap->private_data; - unsigned int nelem; u8 *prd = pp->pkt + QS_CPB_BYTES; + unsigned int si; - WARN_ON(qc->__sg == NULL); - WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); - - nelem = 0; - ata_for_each_sg(sg, qc) { + for_each_sg(qc->sg, sg, qc->n_elem, si) { u64 addr; u32 len; @@ -292,12 +269,11 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) *(__le32 *)prd = cpu_to_le32(len); prd += sizeof(u64); - VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem, + VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si, (unsigned long long)addr, len); - nelem++; } - return nelem; + return si; } static void qs_qc_prep(struct ata_queued_cmd *qc) @@ -311,10 +287,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) VPRINTK("ENTER\n"); qs_enter_reg_mode(qc->ap); - if (qc->tf.protocol != ATA_PROT_DMA) { - ata_qc_prep(qc); + if (qc->tf.protocol != ATA_PROT_DMA) return; - } nelem = qs_fill_sg(qc); @@ -326,7 +300,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) /* host control block (HCB) */ buf[ 0] = QS_HCB_HDR; buf[ 1] = hflags; - *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nsect * ATA_SECT_SIZE); + *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes); *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem); addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES; *(__le64 *)(&buf[16]) = cpu_to_le64(addr); @@ -336,13 +310,13 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) buf[28] = dflags; /* frame information structure (FIS) */ - ata_tf_to_fis(&qc->tf, &buf[32], 0); + ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); } static inline void qs_packet_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000); + u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); VPRINTK("ENTER, ap %p\n", ap); @@ -358,12 +332,11 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc) switch (qc->tf.protocol) { case ATA_PROT_DMA: - pp->state = qs_state_pkt; qs_packet_start(qc); return 0; - case ATA_PROT_ATAPI_DMA: + case ATAPI_PROT_DMA: BUG(); break; @@ -372,14 +345,34 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc) } pp->state = qs_state_mmio; - return ata_qc_issue_prot(qc); + return ata_sff_qc_issue(qc); +} + +static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status) +{ + qc->err_mask |= ac_err_mask(status); + + if (!qc->err_mask) { + ata_qc_complete(qc); + } else { + struct ata_port *ap = qc->ap; + struct ata_eh_info *ehi = &ap->link.eh_info; + + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "status 0x%02X", status); + + if (qc->err_mask == AC_ERR_DEV) + ata_port_abort(ap); + else + ata_port_freeze(ap); + } } static inline unsigned int qs_intr_pkt(struct ata_host *host) { unsigned int handled = 0; u8 sFFE; - u8 __iomem *mmio_base = host->mmio_base; + u8 __iomem *mmio_base = qs_mmio_base(host); do { u32 sff0 = readl(mmio_base + QS_HST_SFF); @@ -392,28 +385,24 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host) u8 sHST = sff1 & 0x3f; /* host status */ unsigned int port_no = (sff1 >> 8) & 0x03; struct ata_port *ap = host->ports[port_no]; + struct qs_port_priv *pp = ap->private_data; + struct ata_queued_cmd *qc; DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", sff1, sff0, port_no, sHST, sDST); handled = 1; - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { - struct ata_queued_cmd *qc; - struct qs_port_priv *pp = ap->private_data; - if (!pp || pp->state != qs_state_pkt) - continue; - qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { - switch (sHST) { - case 0: /* successful CPB */ - case 3: /* device error */ - pp->state = qs_state_idle; - qs_enter_reg_mode(qc->ap); - qc->err_mask |= ac_err_mask(sDST); - ata_qc_complete(qc); - break; - default: - break; - } + if (!pp || pp->state != qs_state_pkt) + continue; + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { + switch (sHST) { + case 0: /* successful CPB */ + case 3: /* device error */ + qs_enter_reg_mode(qc->ap); + qs_do_or_die(qc, sDST); + break; + default: + break; } } } @@ -426,52 +415,52 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host) unsigned int handled = 0, port_no; for (port_no = 0; port_no < host->n_ports; ++port_no) { - struct ata_port *ap; - ap = host->ports[port_no]; - if (ap && - !(ap->flags & ATA_FLAG_DISABLED)) { - struct ata_queued_cmd *qc; - struct qs_port_priv *pp = ap->private_data; - if (!pp || pp->state != qs_state_mmio) - continue; - qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { - - /* check main status, clearing INTRQ */ - u8 status = ata_check_status(ap); - if ((status & ATA_BUSY)) - continue; - DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", - ap->id, qc->tf.protocol, status); - - /* complete taskfile transaction */ - pp->state = qs_state_idle; - qc->err_mask |= ac_err_mask(status); - ata_qc_complete(qc); - handled = 1; - } + struct ata_port *ap = host->ports[port_no]; + struct qs_port_priv *pp = ap->private_data; + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (!qc) { + /* + * The qstor hardware generates spurious + * interrupts from time to time when switching + * in and out of packet mode. There's no + * obvious way to know if we're here now due + * to that, so just ack the irq and pretend we + * knew it was ours.. (ugh). This does not + * affect packet mode. + */ + ata_sff_check_status(ap); + handled = 1; + continue; } + + if (!pp || pp->state != qs_state_mmio) + continue; + if (!(qc->tf.flags & ATA_TFLAG_POLLING)) + handled |= ata_sff_port_intr(ap, qc); } return handled; } -static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs) +static irqreturn_t qs_intr(int irq, void *dev_instance) { struct ata_host *host = dev_instance; unsigned int handled = 0; + unsigned long flags; VPRINTK("ENTER\n"); - spin_lock(&host->lock); + spin_lock_irqsave(&host->lock, flags); handled = qs_intr_pkt(host) | qs_intr_mmio(host); - spin_unlock(&host->lock); + spin_unlock_irqrestore(&host->lock, flags); VPRINTK("EXIT\n"); return IRQ_RETVAL(handled); } -static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base) +static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = port->data_addr = base + 0x400; @@ -493,77 +482,45 @@ static int qs_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct qs_port_priv *pp; - void __iomem *mmio_base = ap->host->mmio_base; + void __iomem *mmio_base = qs_mmio_base(ap->host); void __iomem *chan = mmio_base + (ap->port_no * 0x4000); u64 addr; - int rc; - rc = ata_port_start(ap); - if (rc) - return rc; - qs_enter_reg_mode(ap); - pp = kzalloc(sizeof(*pp), GFP_KERNEL); - if (!pp) { - rc = -ENOMEM; - goto err_out; - } - pp->pkt = dma_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma, - GFP_KERNEL); - if (!pp->pkt) { - rc = -ENOMEM; - goto err_out_kfree; - } + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; + pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma, + GFP_KERNEL); + if (!pp->pkt) + return -ENOMEM; memset(pp->pkt, 0, QS_PKT_BYTES); ap->private_data = pp; + qs_enter_reg_mode(ap); addr = (u64)pp->pkt_dma; writel((u32) addr, chan + QS_CCF_CPBA); writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4); return 0; - -err_out_kfree: - kfree(pp); -err_out: - ata_port_stop(ap); - return rc; -} - -static void qs_port_stop(struct ata_port *ap) -{ - struct device *dev = ap->host->dev; - struct qs_port_priv *pp = ap->private_data; - - if (pp != NULL) { - ap->private_data = NULL; - if (pp->pkt != NULL) - dma_free_coherent(dev, QS_PKT_BYTES, pp->pkt, - pp->pkt_dma); - kfree(pp); - } - ata_port_stop(ap); } static void qs_host_stop(struct ata_host *host) { - void __iomem *mmio_base = host->mmio_base; - struct pci_dev *pdev = to_pci_dev(host->dev); + void __iomem *mmio_base = qs_mmio_base(host); writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ - - pci_iounmap(pdev, mmio_base); } -static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe) +static void qs_host_init(struct ata_host *host, unsigned int chip_id) { - void __iomem *mmio_base = pe->mmio_base; + void __iomem *mmio_base = host->iomap[QS_MMIO_BAR]; unsigned int port_no; writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ /* reset each channel in turn */ - for (port_no = 0; port_no < pe->n_ports; ++port_no) { + for (port_no = 0; port_no < host->n_ports; ++port_no) { u8 __iomem *chan = mmio_base + (port_no * 0x4000); writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1); writeb(QS_CTR0_REG, chan + QS_CCT_CTR0); @@ -571,7 +528,7 @@ static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe) } writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */ - for (port_no = 0; port_no < pe->n_ports; ++port_no) { + for (port_no = 0; port_no < host->n_ports; ++port_no) { u8 __iomem *chan = mmio_base + (port_no * 0x4000); /* set FIFO depths to same settings as Windows driver */ writew(32, chan + QS_CFC_HUFT); @@ -600,26 +557,25 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT); if (have_64bit_bus && - !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) { - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "64-bit DMA enable failed\n"); + dev_err(&pdev->dev, + "64-bit DMA enable failed\n"); return rc; } } } else { - rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit DMA enable failed\n"); + dev_err(&pdev->dev, "32-bit DMA enable failed\n"); return rc; } - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - dev_printk(KERN_ERR, &pdev->dev, + dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } @@ -630,101 +586,58 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - struct ata_probe_ent *probe_ent = NULL; - void __iomem *mmio_base; unsigned int board_idx = (unsigned int) ent->driver_data; + const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL }; + struct ata_host *host; int rc, port_no; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - rc = pci_enable_device(pdev); - if (rc) - return rc; + /* alloc host */ + host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS); + if (!host) + return -ENOMEM; - rc = pci_request_regions(pdev, DRV_NAME); + /* acquire resources and fill host */ + rc = pcim_enable_device(pdev); if (rc) - goto err_out; + return rc; - if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) { - rc = -ENODEV; - goto err_out_regions; - } + if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0) + return -ENODEV; - mmio_base = pci_iomap(pdev, 4, 0); - if (mmio_base == NULL) { - rc = -ENOMEM; - goto err_out_regions; - } + rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME); + if (rc) + return rc; + host->iomap = pcim_iomap_table(pdev); - rc = qs_set_dma_masks(pdev, mmio_base); + rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]); if (rc) - goto err_out_iounmap; + return rc; - probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); - if (probe_ent == NULL) { - rc = -ENOMEM; - goto err_out_iounmap; - } + for (port_no = 0; port_no < host->n_ports; ++port_no) { + struct ata_port *ap = host->ports[port_no]; + unsigned int offset = port_no * 0x4000; + void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset; - memset(probe_ent, 0, sizeof(*probe_ent)); - probe_ent->dev = pci_dev_to_dev(pdev); - INIT_LIST_HEAD(&probe_ent->node); - - probe_ent->sht = qs_port_info[board_idx].sht; - probe_ent->port_flags = qs_port_info[board_idx].flags; - probe_ent->pio_mask = qs_port_info[board_idx].pio_mask; - probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask; - probe_ent->udma_mask = qs_port_info[board_idx].udma_mask; - probe_ent->port_ops = qs_port_info[board_idx].port_ops; - - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->mmio_base = mmio_base; - probe_ent->n_ports = QS_PORTS; - - for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) { - unsigned long chan = (unsigned long)mmio_base + - (port_no * 0x4000); - qs_ata_setup_port(&probe_ent->port[port_no], chan); - } + qs_ata_setup_port(&ap->ioaddr, chan); - pci_set_master(pdev); + ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio"); + ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port"); + } /* initialize adapter */ - qs_host_init(board_idx, probe_ent); - - rc = ata_device_add(probe_ent); - kfree(probe_ent); - if (rc != QS_PORTS) - goto err_out_iounmap; - return 0; - -err_out_iounmap: - pci_iounmap(pdev, mmio_base); -err_out_regions: - pci_release_regions(pdev); -err_out: - pci_disable_device(pdev); - return rc; -} + qs_host_init(host, board_idx); -static int __init qs_ata_init(void) -{ - return pci_register_driver(&qs_ata_pci_driver); + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED, + &qs_ata_sht); } -static void __exit qs_ata_exit(void) -{ - pci_unregister_driver(&qs_ata_pci_driver); -} +module_pci_driver(qs_ata_pci_driver); MODULE_AUTHOR("Mark Lord"); MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl); MODULE_VERSION(DRV_VERSION); - -module_init(qs_ata_init); -module_exit(qs_ata_exit); diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c new file mode 100644 index 00000000000..61eb6d77dac --- /dev/null +++ b/drivers/ata/sata_rcar.c @@ -0,0 +1,1004 @@ +/* + * Renesas R-Car SATA driver + * + * Author: Vladimir Barinov <source@cogentembedded.com> + * Copyright (C) 2013 Cogent Embedded, Inc. + * Copyright (C) 2013 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/ata.h> +#include <linux/libata.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/err.h> + +#define DRV_NAME "sata_rcar" + +/* SH-Navi2G/ATAPI-ATA compatible task registers */ +#define DATA_REG 0x100 +#define SDEVCON_REG 0x138 + +/* SH-Navi2G/ATAPI module compatible control registers */ +#define ATAPI_CONTROL1_REG 0x180 +#define ATAPI_STATUS_REG 0x184 +#define ATAPI_INT_ENABLE_REG 0x188 +#define ATAPI_DTB_ADR_REG 0x198 +#define ATAPI_DMA_START_ADR_REG 0x19C +#define ATAPI_DMA_TRANS_CNT_REG 0x1A0 +#define ATAPI_CONTROL2_REG 0x1A4 +#define ATAPI_SIG_ST_REG 0x1B0 +#define ATAPI_BYTE_SWAP_REG 0x1BC + +/* ATAPI control 1 register (ATAPI_CONTROL1) bits */ +#define ATAPI_CONTROL1_ISM BIT(16) +#define ATAPI_CONTROL1_DTA32M BIT(11) +#define ATAPI_CONTROL1_RESET BIT(7) +#define ATAPI_CONTROL1_DESE BIT(3) +#define ATAPI_CONTROL1_RW BIT(2) +#define ATAPI_CONTROL1_STOP BIT(1) +#define ATAPI_CONTROL1_START BIT(0) + +/* ATAPI status register (ATAPI_STATUS) bits */ +#define ATAPI_STATUS_SATAINT BIT(11) +#define ATAPI_STATUS_DNEND BIT(6) +#define ATAPI_STATUS_DEVTRM BIT(5) +#define ATAPI_STATUS_DEVINT BIT(4) +#define ATAPI_STATUS_ERR BIT(2) +#define ATAPI_STATUS_NEND BIT(1) +#define ATAPI_STATUS_ACT BIT(0) + +/* Interrupt enable register (ATAPI_INT_ENABLE) bits */ +#define ATAPI_INT_ENABLE_SATAINT BIT(11) +#define ATAPI_INT_ENABLE_DNEND BIT(6) +#define ATAPI_INT_ENABLE_DEVTRM BIT(5) +#define ATAPI_INT_ENABLE_DEVINT BIT(4) +#define ATAPI_INT_ENABLE_ERR BIT(2) +#define ATAPI_INT_ENABLE_NEND BIT(1) +#define ATAPI_INT_ENABLE_ACT BIT(0) + +/* Access control registers for physical layer control register */ +#define SATAPHYADDR_REG 0x200 +#define SATAPHYWDATA_REG 0x204 +#define SATAPHYACCEN_REG 0x208 +#define SATAPHYRESET_REG 0x20C +#define SATAPHYRDATA_REG 0x210 +#define SATAPHYACK_REG 0x214 + +/* Physical layer control address command register (SATAPHYADDR) bits */ +#define SATAPHYADDR_PHYRATEMODE BIT(10) +#define SATAPHYADDR_PHYCMD_READ BIT(9) +#define SATAPHYADDR_PHYCMD_WRITE BIT(8) + +/* Physical layer control enable register (SATAPHYACCEN) bits */ +#define SATAPHYACCEN_PHYLANE BIT(0) + +/* Physical layer control reset register (SATAPHYRESET) bits */ +#define SATAPHYRESET_PHYRST BIT(1) +#define SATAPHYRESET_PHYSRES BIT(0) + +/* Physical layer control acknowledge register (SATAPHYACK) bits */ +#define SATAPHYACK_PHYACK BIT(0) + +/* Serial-ATA HOST control registers */ +#define BISTCONF_REG 0x102C +#define SDATA_REG 0x1100 +#define SSDEVCON_REG 0x1204 + +#define SCRSSTS_REG 0x1400 +#define SCRSERR_REG 0x1404 +#define SCRSCON_REG 0x1408 +#define SCRSACT_REG 0x140C + +#define SATAINTSTAT_REG 0x1508 +#define SATAINTMASK_REG 0x150C + +/* SATA INT status register (SATAINTSTAT) bits */ +#define SATAINTSTAT_SERR BIT(3) +#define SATAINTSTAT_ATA BIT(0) + +/* SATA INT mask register (SATAINTSTAT) bits */ +#define SATAINTMASK_SERRMSK BIT(3) +#define SATAINTMASK_ERRMSK BIT(2) +#define SATAINTMASK_ERRCRTMSK BIT(1) +#define SATAINTMASK_ATAMSK BIT(0) + +#define SATA_RCAR_INT_MASK (SATAINTMASK_SERRMSK | \ + SATAINTMASK_ATAMSK) + +/* Physical Layer Control Registers */ +#define SATAPCTLR1_REG 0x43 +#define SATAPCTLR2_REG 0x52 +#define SATAPCTLR3_REG 0x5A +#define SATAPCTLR4_REG 0x60 + +/* Descriptor table word 0 bit (when DTA32M = 1) */ +#define SATA_RCAR_DTEND BIT(0) + +#define SATA_RCAR_DMA_BOUNDARY 0x1FFFFFFEUL + +/* Gen2 Physical Layer Control Registers */ +#define RCAR_GEN2_PHY_CTL1_REG 0x1704 +#define RCAR_GEN2_PHY_CTL1 0x34180002 +#define RCAR_GEN2_PHY_CTL1_SS 0xC180 /* Spread Spectrum */ + +#define RCAR_GEN2_PHY_CTL2_REG 0x170C +#define RCAR_GEN2_PHY_CTL2 0x00002303 + +#define RCAR_GEN2_PHY_CTL3_REG 0x171C +#define RCAR_GEN2_PHY_CTL3 0x000B0194 + +#define RCAR_GEN2_PHY_CTL4_REG 0x1724 +#define RCAR_GEN2_PHY_CTL4 0x00030994 + +#define RCAR_GEN2_PHY_CTL5_REG 0x1740 +#define RCAR_GEN2_PHY_CTL5 0x03004001 +#define RCAR_GEN2_PHY_CTL5_DC BIT(1) /* DC connection */ +#define RCAR_GEN2_PHY_CTL5_TR BIT(2) /* Termination Resistor */ + +enum sata_rcar_type { + RCAR_GEN1_SATA, + RCAR_GEN2_SATA, +}; + +struct sata_rcar_priv { + void __iomem *base; + struct clk *clk; + enum sata_rcar_type type; +}; + +static void sata_rcar_gen1_phy_preinit(struct sata_rcar_priv *priv) +{ + void __iomem *base = priv->base; + + /* idle state */ + iowrite32(0, base + SATAPHYADDR_REG); + /* reset */ + iowrite32(SATAPHYRESET_PHYRST, base + SATAPHYRESET_REG); + udelay(10); + /* deassert reset */ + iowrite32(0, base + SATAPHYRESET_REG); +} + +static void sata_rcar_gen1_phy_write(struct sata_rcar_priv *priv, u16 reg, + u32 val, int group) +{ + void __iomem *base = priv->base; + int timeout; + + /* deassert reset */ + iowrite32(0, base + SATAPHYRESET_REG); + /* lane 1 */ + iowrite32(SATAPHYACCEN_PHYLANE, base + SATAPHYACCEN_REG); + /* write phy register value */ + iowrite32(val, base + SATAPHYWDATA_REG); + /* set register group */ + if (group) + reg |= SATAPHYADDR_PHYRATEMODE; + /* write command */ + iowrite32(SATAPHYADDR_PHYCMD_WRITE | reg, base + SATAPHYADDR_REG); + /* wait for ack */ + for (timeout = 0; timeout < 100; timeout++) { + val = ioread32(base + SATAPHYACK_REG); + if (val & SATAPHYACK_PHYACK) + break; + } + if (timeout >= 100) + pr_err("%s timeout\n", __func__); + /* idle state */ + iowrite32(0, base + SATAPHYADDR_REG); +} + +static void sata_rcar_gen1_phy_init(struct sata_rcar_priv *priv) +{ + sata_rcar_gen1_phy_preinit(priv); + sata_rcar_gen1_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 0); + sata_rcar_gen1_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 1); + sata_rcar_gen1_phy_write(priv, SATAPCTLR3_REG, 0x0000A061, 0); + sata_rcar_gen1_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 0); + sata_rcar_gen1_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 1); + sata_rcar_gen1_phy_write(priv, SATAPCTLR4_REG, 0x28E80000, 0); +} + +static void sata_rcar_gen2_phy_init(struct sata_rcar_priv *priv) +{ + void __iomem *base = priv->base; + + iowrite32(RCAR_GEN2_PHY_CTL1, base + RCAR_GEN2_PHY_CTL1_REG); + iowrite32(RCAR_GEN2_PHY_CTL2, base + RCAR_GEN2_PHY_CTL2_REG); + iowrite32(RCAR_GEN2_PHY_CTL3, base + RCAR_GEN2_PHY_CTL3_REG); + iowrite32(RCAR_GEN2_PHY_CTL4, base + RCAR_GEN2_PHY_CTL4_REG); + iowrite32(RCAR_GEN2_PHY_CTL5 | RCAR_GEN2_PHY_CTL5_DC | + RCAR_GEN2_PHY_CTL5_TR, base + RCAR_GEN2_PHY_CTL5_REG); +} + +static void sata_rcar_freeze(struct ata_port *ap) +{ + struct sata_rcar_priv *priv = ap->host->private_data; + + /* mask */ + iowrite32(0x7ff, priv->base + SATAINTMASK_REG); + + ata_sff_freeze(ap); +} + +static void sata_rcar_thaw(struct ata_port *ap) +{ + struct sata_rcar_priv *priv = ap->host->private_data; + void __iomem *base = priv->base; + + /* ack */ + iowrite32(~(u32)SATA_RCAR_INT_MASK, base + SATAINTSTAT_REG); + + ata_sff_thaw(ap); + + /* unmask */ + iowrite32(0x7ff & ~SATA_RCAR_INT_MASK, base + SATAINTMASK_REG); +} + +static void sata_rcar_ioread16_rep(void __iomem *reg, void *buffer, int count) +{ + u16 *ptr = buffer; + + while (count--) { + u16 data = ioread32(reg); + + *ptr++ = data; + } +} + +static void sata_rcar_iowrite16_rep(void __iomem *reg, void *buffer, int count) +{ + const u16 *ptr = buffer; + + while (count--) + iowrite32(*ptr++, reg); +} + +static u8 sata_rcar_check_status(struct ata_port *ap) +{ + return ioread32(ap->ioaddr.status_addr); +} + +static u8 sata_rcar_check_altstatus(struct ata_port *ap) +{ + return ioread32(ap->ioaddr.altstatus_addr); +} + +static void sata_rcar_set_devctl(struct ata_port *ap, u8 ctl) +{ + iowrite32(ctl, ap->ioaddr.ctl_addr); +} + +static void sata_rcar_dev_select(struct ata_port *ap, unsigned int device) +{ + iowrite32(ATA_DEVICE_OBS, ap->ioaddr.device_addr); + ata_sff_pause(ap); /* needed; also flushes, for mmio */ +} + +static unsigned int sata_rcar_ata_devchk(struct ata_port *ap, + unsigned int device) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 nsect, lbal; + + sata_rcar_dev_select(ap, device); + + iowrite32(0x55, ioaddr->nsect_addr); + iowrite32(0xaa, ioaddr->lbal_addr); + + iowrite32(0xaa, ioaddr->nsect_addr); + iowrite32(0x55, ioaddr->lbal_addr); + + iowrite32(0x55, ioaddr->nsect_addr); + iowrite32(0xaa, ioaddr->lbal_addr); + + nsect = ioread32(ioaddr->nsect_addr); + lbal = ioread32(ioaddr->lbal_addr); + + if (nsect == 0x55 && lbal == 0xaa) + return 1; /* found a device */ + + return 0; /* nothing found */ +} + +static int sata_rcar_wait_after_reset(struct ata_link *link, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + + ata_msleep(ap, ATA_WAIT_AFTER_RESET); + + return ata_sff_wait_ready(link, deadline); +} + +static int sata_rcar_bus_softreset(struct ata_port *ap, unsigned long deadline) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); + + /* software reset. causes dev0 to be selected */ + iowrite32(ap->ctl, ioaddr->ctl_addr); + udelay(20); + iowrite32(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(20); + iowrite32(ap->ctl, ioaddr->ctl_addr); + ap->last_ctl = ap->ctl; + + /* wait the port to become ready */ + return sata_rcar_wait_after_reset(&ap->link, deadline); +} + +static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + unsigned int devmask = 0; + int rc; + u8 err; + + /* determine if device 0 is present */ + if (sata_rcar_ata_devchk(ap, 0)) + devmask |= 1 << 0; + + /* issue bus reset */ + DPRINTK("about to softreset, devmask=%x\n", devmask); + rc = sata_rcar_bus_softreset(ap, deadline); + /* if link is occupied, -ENODEV too is an error */ + if (rc && (rc != -ENODEV || sata_scr_valid(link))) { + ata_link_err(link, "SRST failed (errno=%d)\n", rc); + return rc; + } + + /* determine by signature whether we have ATA or ATAPI devices */ + classes[0] = ata_sff_dev_classify(&link->device[0], devmask, &err); + + DPRINTK("classes[0]=%u\n", classes[0]); + return 0; +} + +static void sata_rcar_tf_load(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + iowrite32(tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + iowrite32(tf->hob_feature, ioaddr->feature_addr); + iowrite32(tf->hob_nsect, ioaddr->nsect_addr); + iowrite32(tf->hob_lbal, ioaddr->lbal_addr); + iowrite32(tf->hob_lbam, ioaddr->lbam_addr); + iowrite32(tf->hob_lbah, ioaddr->lbah_addr); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + if (is_addr) { + iowrite32(tf->feature, ioaddr->feature_addr); + iowrite32(tf->nsect, ioaddr->nsect_addr); + iowrite32(tf->lbal, ioaddr->lbal_addr); + iowrite32(tf->lbam, ioaddr->lbam_addr); + iowrite32(tf->lbah, ioaddr->lbah_addr); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + iowrite32(tf->device, ioaddr->device_addr); + VPRINTK("device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + +static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->command = sata_rcar_check_status(ap); + tf->feature = ioread32(ioaddr->error_addr); + tf->nsect = ioread32(ioaddr->nsect_addr); + tf->lbal = ioread32(ioaddr->lbal_addr); + tf->lbam = ioread32(ioaddr->lbam_addr); + tf->lbah = ioread32(ioaddr->lbah_addr); + tf->device = ioread32(ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + iowrite32(tf->ctl | ATA_HOB, ioaddr->ctl_addr); + tf->hob_feature = ioread32(ioaddr->error_addr); + tf->hob_nsect = ioread32(ioaddr->nsect_addr); + tf->hob_lbal = ioread32(ioaddr->lbal_addr); + tf->hob_lbam = ioread32(ioaddr->lbam_addr); + tf->hob_lbah = ioread32(ioaddr->lbah_addr); + iowrite32(tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; + } +} + +static void sata_rcar_exec_command(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); + + iowrite32(tf->command, ap->ioaddr.command_addr); + ata_sff_pause(ap); +} + +static unsigned int sata_rcar_data_xfer(struct ata_device *dev, + unsigned char *buf, + unsigned int buflen, int rw) +{ + struct ata_port *ap = dev->link->ap; + void __iomem *data_addr = ap->ioaddr.data_addr; + unsigned int words = buflen >> 1; + + /* Transfer multiple of 2 bytes */ + if (rw == READ) + sata_rcar_ioread16_rep(data_addr, buf, words); + else + sata_rcar_iowrite16_rep(data_addr, buf, words); + + /* Transfer trailing byte, if any. */ + if (unlikely(buflen & 0x01)) { + unsigned char pad[2] = { }; + + /* Point buf to the tail of buffer */ + buf += buflen - 1; + + /* + * Use io*16_rep() accessors here as well to avoid pointlessly + * swapping bytes to and from on the big endian machines... + */ + if (rw == READ) { + sata_rcar_ioread16_rep(data_addr, pad, 1); + *buf = pad[0]; + } else { + pad[0] = *buf; + sata_rcar_iowrite16_rep(data_addr, pad, 1); + } + words++; + } + + return words << 1; +} + +static void sata_rcar_drain_fifo(struct ata_queued_cmd *qc) +{ + int count; + struct ata_port *ap; + + /* We only need to flush incoming data when a command was running */ + if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) + return; + + ap = qc->ap; + /* Drain up to 64K of data before we give up this recovery method */ + for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) && + count < 65536; count += 2) + ioread32(ap->ioaddr.data_addr); + + /* Can become DEBUG later */ + if (count) + ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count); +} + +static int sata_rcar_scr_read(struct ata_link *link, unsigned int sc_reg, + u32 *val) +{ + if (sc_reg > SCR_ACTIVE) + return -EINVAL; + + *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg << 2)); + return 0; +} + +static int sata_rcar_scr_write(struct ata_link *link, unsigned int sc_reg, + u32 val) +{ + if (sc_reg > SCR_ACTIVE) + return -EINVAL; + + iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg << 2)); + return 0; +} + +static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_bmdma_prd *prd = ap->bmdma_prd; + struct scatterlist *sg; + unsigned int si; + + for_each_sg(qc->sg, sg, qc->n_elem, si) { + u32 addr, sg_len; + + /* + * Note: h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32)sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + prd[si].addr = cpu_to_le32(addr); + prd[si].flags_len = cpu_to_le32(sg_len); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len); + } + + /* end-of-table flag */ + prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND); +} + +static void sata_rcar_qc_prep(struct ata_queued_cmd *qc) +{ + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + sata_rcar_bmdma_fill_sg(qc); +} + +static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = qc->tf.flags & ATA_TFLAG_WRITE; + struct sata_rcar_priv *priv = ap->host->private_data; + void __iomem *base = priv->base; + u32 dmactl; + + /* load PRD table addr. */ + mb(); /* make sure PRD table writes are visible to controller */ + iowrite32(ap->bmdma_prd_dma, base + ATAPI_DTB_ADR_REG); + + /* specify data direction, triple-check start bit is clear */ + dmactl = ioread32(base + ATAPI_CONTROL1_REG); + dmactl &= ~(ATAPI_CONTROL1_RW | ATAPI_CONTROL1_STOP); + if (dmactl & ATAPI_CONTROL1_START) { + dmactl &= ~ATAPI_CONTROL1_START; + dmactl |= ATAPI_CONTROL1_STOP; + } + if (!rw) + dmactl |= ATAPI_CONTROL1_RW; + iowrite32(dmactl, base + ATAPI_CONTROL1_REG); + + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} + +static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct sata_rcar_priv *priv = ap->host->private_data; + void __iomem *base = priv->base; + u32 dmactl; + + /* start host DMA transaction */ + dmactl = ioread32(base + ATAPI_CONTROL1_REG); + dmactl &= ~ATAPI_CONTROL1_STOP; + dmactl |= ATAPI_CONTROL1_START; + iowrite32(dmactl, base + ATAPI_CONTROL1_REG); +} + +static void sata_rcar_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct sata_rcar_priv *priv = ap->host->private_data; + void __iomem *base = priv->base; + u32 dmactl; + + /* force termination of DMA transfer if active */ + dmactl = ioread32(base + ATAPI_CONTROL1_REG); + if (dmactl & ATAPI_CONTROL1_START) { + dmactl &= ~ATAPI_CONTROL1_START; + dmactl |= ATAPI_CONTROL1_STOP; + iowrite32(dmactl, base + ATAPI_CONTROL1_REG); + } + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_sff_dma_pause(ap); +} + +static u8 sata_rcar_bmdma_status(struct ata_port *ap) +{ + struct sata_rcar_priv *priv = ap->host->private_data; + u8 host_stat = 0; + u32 status; + + status = ioread32(priv->base + ATAPI_STATUS_REG); + if (status & ATAPI_STATUS_DEVINT) + host_stat |= ATA_DMA_INTR; + if (status & ATAPI_STATUS_ACT) + host_stat |= ATA_DMA_ACTIVE; + + return host_stat; +} + +static struct scsi_host_template sata_rcar_sht = { + ATA_BASE_SHT(DRV_NAME), + /* + * This controller allows transfer chunks up to 512MB which cross 64KB + * boundaries, therefore the DMA limits are more relaxed than standard + * ATA SFF. + */ + .sg_tablesize = ATA_MAX_PRD, + .dma_boundary = SATA_RCAR_DMA_BOUNDARY, +}; + +static struct ata_port_operations sata_rcar_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .freeze = sata_rcar_freeze, + .thaw = sata_rcar_thaw, + .softreset = sata_rcar_softreset, + + .scr_read = sata_rcar_scr_read, + .scr_write = sata_rcar_scr_write, + + .sff_dev_select = sata_rcar_dev_select, + .sff_set_devctl = sata_rcar_set_devctl, + .sff_check_status = sata_rcar_check_status, + .sff_check_altstatus = sata_rcar_check_altstatus, + .sff_tf_load = sata_rcar_tf_load, + .sff_tf_read = sata_rcar_tf_read, + .sff_exec_command = sata_rcar_exec_command, + .sff_data_xfer = sata_rcar_data_xfer, + .sff_drain_fifo = sata_rcar_drain_fifo, + + .qc_prep = sata_rcar_qc_prep, + + .bmdma_setup = sata_rcar_bmdma_setup, + .bmdma_start = sata_rcar_bmdma_start, + .bmdma_stop = sata_rcar_bmdma_stop, + .bmdma_status = sata_rcar_bmdma_status, +}; + +static void sata_rcar_serr_interrupt(struct ata_port *ap) +{ + struct sata_rcar_priv *priv = ap->host->private_data; + struct ata_eh_info *ehi = &ap->link.eh_info; + int freeze = 0; + u32 serror; + + serror = ioread32(priv->base + SCRSERR_REG); + if (!serror) + return; + + DPRINTK("SError @host_intr: 0x%x\n", serror); + + /* first, analyze and record host port events */ + ata_ehi_clear_desc(ehi); + + if (serror & (SERR_DEV_XCHG | SERR_PHYRDY_CHG)) { + /* Setup a soft-reset EH action */ + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, "%s", "hotplug"); + + freeze = serror & SERR_COMM_WAKE ? 0 : 1; + } + + /* freeze or abort */ + if (freeze) + ata_port_freeze(ap); + else + ata_port_abort(ap); +} + +static void sata_rcar_ata_interrupt(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + int handled = 0; + + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc) + handled |= ata_bmdma_port_intr(ap, qc); + + /* be sure to clear ATA interrupt */ + if (!handled) + sata_rcar_check_status(ap); +} + +static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + struct sata_rcar_priv *priv = host->private_data; + void __iomem *base = priv->base; + unsigned int handled = 0; + struct ata_port *ap; + u32 sataintstat; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + sataintstat = ioread32(base + SATAINTSTAT_REG); + sataintstat &= SATA_RCAR_INT_MASK; + if (!sataintstat) + goto done; + /* ack */ + iowrite32(~sataintstat & 0x7ff, base + SATAINTSTAT_REG); + + ap = host->ports[0]; + + if (sataintstat & SATAINTSTAT_ATA) + sata_rcar_ata_interrupt(ap); + + if (sataintstat & SATAINTSTAT_SERR) + sata_rcar_serr_interrupt(ap); + + handled = 1; +done: + spin_unlock_irqrestore(&host->lock, flags); + + return IRQ_RETVAL(handled); +} + +static void sata_rcar_setup_port(struct ata_host *host) +{ + struct ata_port *ap = host->ports[0]; + struct ata_ioports *ioaddr = &ap->ioaddr; + struct sata_rcar_priv *priv = host->private_data; + void __iomem *base = priv->base; + + ap->ops = &sata_rcar_port_ops; + ap->pio_mask = ATA_PIO4; + ap->udma_mask = ATA_UDMA6; + ap->flags |= ATA_FLAG_SATA; + + ioaddr->cmd_addr = base + SDATA_REG; + ioaddr->ctl_addr = base + SSDEVCON_REG; + ioaddr->scr_addr = base + SCRSSTS_REG; + ioaddr->altstatus_addr = ioaddr->ctl_addr; + + ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2); + ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2); + ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2); + ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2); + ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2); + ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2); + ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2); + ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2); + ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2); + ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2); +} + +static void sata_rcar_init_controller(struct ata_host *host) +{ + struct sata_rcar_priv *priv = host->private_data; + void __iomem *base = priv->base; + u32 val; + + /* reset and setup phy */ + switch (priv->type) { + case RCAR_GEN1_SATA: + sata_rcar_gen1_phy_init(priv); + break; + case RCAR_GEN2_SATA: + sata_rcar_gen2_phy_init(priv); + break; + default: + dev_warn(host->dev, "SATA phy is not initialized\n"); + break; + } + + /* SATA-IP reset state */ + val = ioread32(base + ATAPI_CONTROL1_REG); + val |= ATAPI_CONTROL1_RESET; + iowrite32(val, base + ATAPI_CONTROL1_REG); + + /* ISM mode, PRD mode, DTEND flag at bit 0 */ + val = ioread32(base + ATAPI_CONTROL1_REG); + val |= ATAPI_CONTROL1_ISM; + val |= ATAPI_CONTROL1_DESE; + val |= ATAPI_CONTROL1_DTA32M; + iowrite32(val, base + ATAPI_CONTROL1_REG); + + /* Release the SATA-IP from the reset state */ + val = ioread32(base + ATAPI_CONTROL1_REG); + val &= ~ATAPI_CONTROL1_RESET; + iowrite32(val, base + ATAPI_CONTROL1_REG); + + /* ack and mask */ + iowrite32(0, base + SATAINTSTAT_REG); + iowrite32(0x7ff, base + SATAINTMASK_REG); + /* enable interrupts */ + iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG); +} + +static struct of_device_id sata_rcar_match[] = { + { + /* Deprecated by "renesas,sata-r8a7779" */ + .compatible = "renesas,rcar-sata", + .data = (void *)RCAR_GEN1_SATA, + }, + { + .compatible = "renesas,sata-r8a7779", + .data = (void *)RCAR_GEN1_SATA, + }, + { + .compatible = "renesas,sata-r8a7790", + .data = (void *)RCAR_GEN2_SATA + }, + { + .compatible = "renesas,sata-r8a7791", + .data = (void *)RCAR_GEN2_SATA + }, + { }, +}; +MODULE_DEVICE_TABLE(of, sata_rcar_match); + +static const struct platform_device_id sata_rcar_id_table[] = { + { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */ + { "sata-r8a7779", RCAR_GEN1_SATA }, + { "sata-r8a7790", RCAR_GEN2_SATA }, + { "sata-r8a7791", RCAR_GEN2_SATA }, + { }, +}; +MODULE_DEVICE_TABLE(platform, sata_rcar_id_table); + +static int sata_rcar_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + struct ata_host *host; + struct sata_rcar_priv *priv; + struct resource *mem; + int irq; + int ret = 0; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -EINVAL; + + priv = devm_kzalloc(&pdev->dev, sizeof(struct sata_rcar_priv), + GFP_KERNEL); + if (!priv) + return -ENOMEM; + + of_id = of_match_device(sata_rcar_match, &pdev->dev); + if (of_id) + priv->type = (enum sata_rcar_type)of_id->data; + else + priv->type = platform_get_device_id(pdev)->driver_data; + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "failed to get access to sata clock\n"); + return PTR_ERR(priv->clk); + } + clk_prepare_enable(priv->clk); + + host = ata_host_alloc(&pdev->dev, 1); + if (!host) { + dev_err(&pdev->dev, "ata_host_alloc failed\n"); + ret = -ENOMEM; + goto cleanup; + } + + host->private_data = priv; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); + goto cleanup; + } + + /* setup port */ + sata_rcar_setup_port(host); + + /* initialize host controller */ + sata_rcar_init_controller(host); + + ret = ata_host_activate(host, irq, sata_rcar_interrupt, 0, + &sata_rcar_sht); + if (!ret) + return 0; + +cleanup: + clk_disable_unprepare(priv->clk); + + return ret; +} + +static int sata_rcar_remove(struct platform_device *pdev) +{ + struct ata_host *host = platform_get_drvdata(pdev); + struct sata_rcar_priv *priv = host->private_data; + void __iomem *base = priv->base; + + ata_host_detach(host); + + /* disable interrupts */ + iowrite32(0, base + ATAPI_INT_ENABLE_REG); + /* ack and mask */ + iowrite32(0, base + SATAINTSTAT_REG); + iowrite32(0x7ff, base + SATAINTMASK_REG); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sata_rcar_suspend(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct sata_rcar_priv *priv = host->private_data; + void __iomem *base = priv->base; + int ret; + + ret = ata_host_suspend(host, PMSG_SUSPEND); + if (!ret) { + /* disable interrupts */ + iowrite32(0, base + ATAPI_INT_ENABLE_REG); + /* mask */ + iowrite32(0x7ff, base + SATAINTMASK_REG); + + clk_disable_unprepare(priv->clk); + } + + return ret; +} + +static int sata_rcar_resume(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct sata_rcar_priv *priv = host->private_data; + void __iomem *base = priv->base; + + clk_prepare_enable(priv->clk); + + /* ack and mask */ + iowrite32(0, base + SATAINTSTAT_REG); + iowrite32(0x7ff, base + SATAINTMASK_REG); + /* enable interrupts */ + iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG); + + ata_host_resume(host); + + return 0; +} + +static const struct dev_pm_ops sata_rcar_pm_ops = { + .suspend = sata_rcar_suspend, + .resume = sata_rcar_resume, +}; +#endif + +static struct platform_driver sata_rcar_driver = { + .probe = sata_rcar_probe, + .remove = sata_rcar_remove, + .id_table = sata_rcar_id_table, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = sata_rcar_match, +#ifdef CONFIG_PM_SLEEP + .pm = &sata_rcar_pm_ops, +#endif + }, +}; + +module_platform_driver(sata_rcar_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Vladimir Barinov"); +MODULE_DESCRIPTION("Renesas R-Car SATA controller low level driver"); diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index c63dbabc0cd..40b76b2d18c 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -1,7 +1,7 @@ /* * sata_sil.c - Silicon Image SATA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -37,18 +37,22 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> +#include <linux/dmi.h> #define DRV_NAME "sata_sil" -#define DRV_VERSION "2.0" +#define DRV_VERSION "2.4" + +#define SIL_DMA_BOUNDARY 0x7fffffffUL enum { + SIL_MMIO_BAR = 5, + /* * host flags */ @@ -56,8 +60,7 @@ enum { SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), SIL_FLAG_MOD15WRITE = (1 << 30), - SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME, + SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA, /* * Controller IDs @@ -108,35 +111,38 @@ enum { SIL_QUIRK_UDMA5MAX = (1 << 1), }; -static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -#ifdef CONFIG_PM +static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +#ifdef CONFIG_PM_SLEEP static int sil_pci_device_resume(struct pci_dev *pdev); #endif -static void sil_dev_config(struct ata_port *ap, struct ata_device *dev); -static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); -static void sil_post_set_mode (struct ata_port *ap); -static irqreturn_t sil_interrupt(int irq, void *dev_instance, - struct pt_regs *regs); +static void sil_dev_config(struct ata_device *dev); +static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); +static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); +static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); +static void sil_qc_prep(struct ata_queued_cmd *qc); +static void sil_bmdma_setup(struct ata_queued_cmd *qc); +static void sil_bmdma_start(struct ata_queued_cmd *qc); +static void sil_bmdma_stop(struct ata_queued_cmd *qc); static void sil_freeze(struct ata_port *ap); static void sil_thaw(struct ata_port *ap); static const struct pci_device_id sil_pci_tbl[] = { - { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, - { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, - { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 }, - { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, - { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, - { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq }, - { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq }, + { PCI_VDEVICE(CMD, 0x3112), sil_3112 }, + { PCI_VDEVICE(CMD, 0x0240), sil_3112 }, + { PCI_VDEVICE(CMD, 0x3512), sil_3512 }, + { PCI_VDEVICE(CMD, 0x3114), sil_3114 }, + { PCI_VDEVICE(ATI, 0x436e), sil_3112 }, + { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq }, + { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq }, + { } /* terminate list */ }; /* TODO firmware versions should be added - eric */ static const struct sil_drivelist { - const char * product; + const char *product; unsigned int quirk; } sil_blacklist [] = { { "ST320012AS", SIL_QUIRK_MOD15WRITE }, @@ -150,6 +156,7 @@ static const struct sil_drivelist { { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, + { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE }, { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, { } }; @@ -159,97 +166,67 @@ static struct pci_driver sil_pci_driver = { .id_table = sil_pci_tbl, .probe = sil_init_one, .remove = ata_pci_remove_one, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .suspend = ata_pci_device_suspend, .resume = sil_pci_device_resume, #endif }; static struct scsi_host_template sil_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, - .suspend = ata_scsi_device_suspend, - .resume = ata_scsi_device_resume, + ATA_BASE_SHT(DRV_NAME), + /** These controllers support Large Block Transfer which allows + transfer chunks up to 2GB and which cross 64KB boundaries, + therefore the DMA limits are more relaxed than standard ATA SFF. */ + .dma_boundary = SIL_DMA_BOUNDARY, + .sg_tablesize = ATA_MAX_PRD }; -static const struct ata_port_operations sil_ops = { - .port_disable = ata_port_disable, +static struct ata_port_operations sil_ops = { + .inherits = &ata_bmdma32_port_ops, .dev_config = sil_dev_config, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - .post_set_mode = sil_post_set_mode, - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_mmio_data_xfer, + .set_mode = sil_set_mode, + .bmdma_setup = sil_bmdma_setup, + .bmdma_start = sil_bmdma_start, + .bmdma_stop = sil_bmdma_stop, + .qc_prep = sil_qc_prep, .freeze = sil_freeze, .thaw = sil_thaw, - .error_handler = ata_bmdma_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - .irq_handler = sil_interrupt, - .irq_clear = ata_bmdma_irq_clear, .scr_read = sil_scr_read, .scr_write = sil_scr_write, - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_pci_host_stop, }; static const struct ata_port_info sil_port_info[] = { /* sil_3112 */ { - .sht = &sil_sht, .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x3f, /* udma0-5 */ + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &sil_ops, }, /* sil_3112_no_sata_irq */ { - .sht = &sil_sht, .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | SIL_FLAG_NO_SATA_IRQ, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x3f, /* udma0-5 */ + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &sil_ops, }, /* sil_3512 */ { - .sht = &sil_sht, .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x3f, /* udma0-5 */ + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &sil_ops, }, /* sil_3114 */ { - .sht = &sil_sht, .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x3f, /* udma0-5 */ + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &sil_ops, }, }; @@ -268,8 +245,9 @@ static const struct { unsigned long sfis_cfg; /* SATA FIS reception config register */ } sil_port[] = { /* port 0 ... */ - { 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c }, - { 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc }, + /* tf ctl bmdma bmdma2 fifo scr sien mode sfis */ + { 0x80, 0x8A, 0x0, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c }, + { 0xC0, 0xCA, 0x8, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc }, { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c }, { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc }, /* ... port 3 */ @@ -281,11 +259,88 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sil_pci_tbl); MODULE_VERSION(DRV_VERSION); -static int slow_down = 0; +static int slow_down; module_param(slow_down, int, 0444); MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); +static void sil_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; + void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2; + + /* clear start/stop bit - can safely always write 0 */ + iowrite8(0, bmdma2); + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_sff_dma_pause(ap); +} + +static void sil_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *bmdma = ap->ioaddr.bmdma_addr; + + /* load PRD table addr. */ + iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS); + + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} + +static void sil_bmdma_start(struct ata_queued_cmd *qc) +{ + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + struct ata_port *ap = qc->ap; + void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; + void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2; + u8 dmactl = ATA_DMA_START; + + /* set transfer direction, start host DMA transaction + Note: For Large Block Transfer to work, the DMA must be started + using the bmdma2 register. */ + if (!rw) + dmactl |= ATA_DMA_WR; + iowrite8(dmactl, bmdma2); +} + +/* The way God intended PCI IDE scatter/gather lists to look and behave... */ +static void sil_fill_sg(struct ata_queued_cmd *qc) +{ + struct scatterlist *sg; + struct ata_port *ap = qc->ap; + struct ata_bmdma_prd *prd, *last_prd = NULL; + unsigned int si; + + prd = &ap->bmdma_prd[0]; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + /* Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + u32 addr = (u32) sg_dma_address(sg); + u32 sg_len = sg_dma_len(sg); + + prd->addr = cpu_to_le32(addr); + prd->flags_len = cpu_to_le32(sg_len); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len); + + last_prd = prd; + prd++; + } + + if (likely(last_prd)) + last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT); +} + +static void sil_qc_prep(struct ata_queued_cmd *qc) +{ + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + sil_fill_sg(qc); +} + static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) { u8 cache_line = 0; @@ -293,22 +348,35 @@ static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) return cache_line; } -static void sil_post_set_mode (struct ata_port *ap) +/** + * sil_set_mode - wrap set_mode functions + * @link: link to set up + * @r_failed: returned device when we fail + * + * Wrap the libata method for device setup as after the setup we need + * to inspect the results and do some configuration work + */ + +static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed) { - struct ata_host *host = ap->host; + struct ata_port *ap = link->ap; + void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; + void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode; struct ata_device *dev; - void __iomem *addr = host->mmio_base + sil_port[ap->port_no].xfer_mode; - u32 tmp, dev_mode[2]; - unsigned int i; + u32 tmp, dev_mode[2] = { }; + int rc; + + rc = ata_do_set_mode(link, r_failed); + if (rc) + return rc; - for (i = 0; i < 2; i++) { - dev = &ap->device[i]; + ata_for_each_dev(dev, link, ALL) { if (!ata_dev_enabled(dev)) - dev_mode[i] = 0; /* PIO0/1/2 */ + dev_mode[dev->devno] = 0; /* PIO0/1/2 */ else if (dev->flags & ATA_DFLAG_PIO) - dev_mode[i] = 1; /* PIO3/4 */ + dev_mode[dev->devno] = 1; /* PIO3/4 */ else - dev_mode[i] = 3; /* UDMA */ + dev_mode[dev->devno] = 3; /* UDMA */ /* value 2 indicates MDMA */ } @@ -318,11 +386,13 @@ static void sil_post_set_mode (struct ata_port *ap) tmp |= (dev_mode[1] << 4); writel(tmp, addr); readl(addr); /* flush */ + return 0; } -static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg) +static inline void __iomem *sil_scr_addr(struct ata_port *ap, + unsigned int sc_reg) { - unsigned long offset = ap->ioaddr.scr_addr; + void __iomem *offset = ap->ioaddr.scr_addr; switch (sc_reg) { case SCR_STATUS: @@ -336,54 +406,64 @@ static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_re break; } - return 0; + return NULL; } -static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { - void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg); - if (mmio) - return readl(mmio); - return 0xffffffffU; + void __iomem *mmio = sil_scr_addr(link->ap, sc_reg); + + if (mmio) { + *val = readl(mmio); + return 0; + } + return -EINVAL; } -static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) { - void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg); - if (mmio) + void __iomem *mmio = sil_scr_addr(link->ap, sc_reg); + + if (mmio) { writel(val, mmio); + return 0; + } + return -EINVAL; } static void sil_host_intr(struct ata_port *ap, u32 bmdma2) { - struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); + struct ata_eh_info *ehi = &ap->link.eh_info; + struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); u8 status; if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { - u32 serror; + u32 serror = 0xffffffff; /* SIEN doesn't mask SATA IRQs on some 3112s. Those * controllers continue to assert IRQ as long as * SError bits are pending. Clear SError immediately. */ - serror = sil_scr_read(ap, SCR_ERROR); - sil_scr_write(ap, SCR_ERROR, serror); + sil_scr_read(&ap->link, SCR_ERROR, &serror); + sil_scr_write(&ap->link, SCR_ERROR, serror); - /* Trigger hotplug and accumulate SError only if the - * port isn't already frozen. Otherwise, PHY events - * during hardreset makes controllers with broken SIEN - * repeat probing needlessly. + /* Sometimes spurious interrupts occur, double check + * it's PHYRDY CHG. */ - if (!(ap->pflags & ATA_PFLAG_FROZEN)) { - ata_ehi_hotplugged(&ap->eh_info); - ap->eh_info.serror |= serror; + if (serror & SERR_PHYRDY_CHG) { + ap->link.eh_info.serror |= serror; + goto freeze; } - goto freeze; + if (!(bmdma2 & SIL_DMA_COMPLETE)) + return; } - if (unlikely(!qc || qc->tf.ctl & ATA_NIEN)) - goto freeze; + if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { + /* this sometimes happens, just clear IRQ */ + ap->ops->sff_check_status(ap); + return; + } /* Check whether we are expecting interrupt in this state */ switch (ap->hsm_task_state) { @@ -393,15 +473,14 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) */ /* Check the ATA_DFLAG_CDB_INTR flag is enough here. - * The flag was turned on only for atapi devices. - * No need to check is_atapi_taskfile(&qc->tf) again. + * The flag was turned on only for atapi devices. No + * need to check ata_is_atapi(qc->tf.protocol) again. */ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) goto err_hsm; break; case HSM_ST_LAST: - if (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATA_PROT_ATAPI_DMA) { + if (ata_is_dma(qc->tf.protocol)) { /* clear DMA-Start bit */ ap->ops->bmdma_stop(qc); @@ -418,7 +497,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) } /* check main status, clearing INTRQ */ - status = ata_chk_status(ap); + status = ap->ops->sff_check_status(ap); if (unlikely(status & ATA_BUSY)) goto err_hsm; @@ -426,7 +505,10 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) ata_bmdma_irq_clear(ap); /* kick HSM in the ass */ - ata_hsm_move(ap, qc, status, 0); + ata_sff_hsm_move(ap, qc, status, 0); + + if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) + ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2); return; @@ -436,11 +518,10 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) ata_port_freeze(ap); } -static irqreturn_t sil_interrupt(int irq, void *dev_instance, - struct pt_regs *regs) +static irqreturn_t sil_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; - void __iomem *mmio_base = host->mmio_base; + void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; int handled = 0; int i; @@ -450,9 +531,6 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance, struct ata_port *ap = host->ports[i]; u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); - if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) - continue; - /* turn off SATA_IRQ if not supported */ if (ap->flags & SIL_FLAG_NO_SATA_IRQ) bmdma2 &= ~SIL_DMA_SATA_IRQ; @@ -472,7 +550,7 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance, static void sil_freeze(struct ata_port *ap) { - void __iomem *mmio_base = ap->host->mmio_base; + void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; u32 tmp; /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */ @@ -483,15 +561,28 @@ static void sil_freeze(struct ata_port *ap) tmp |= SIL_MASK_IDE0_INT << ap->port_no; writel(tmp, mmio_base + SIL_SYSCFG); readl(mmio_base + SIL_SYSCFG); /* flush */ + + /* Ensure DMA_ENABLE is off. + * + * This is because the controller will not give us access to the + * taskfile registers while a DMA is in progress + */ + iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE, + ap->ioaddr.bmdma_addr); + + /* According to ata_bmdma_stop, an HDMA transition requires + * on PIO cycle. But we can't read a taskfile register. + */ + ioread8(ap->ioaddr.bmdma_addr); } static void sil_thaw(struct ata_port *ap) { - void __iomem *mmio_base = ap->host->mmio_base; + void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; u32 tmp; /* clear IRQ */ - ata_chk_status(ap); + ap->ops->sff_check_status(ap); ata_bmdma_irq_clear(ap); /* turn on SATA IRQ if supported */ @@ -506,7 +597,6 @@ static void sil_thaw(struct ata_port *ap) /** * sil_dev_config - Apply device/host-specific errata fixups - * @ap: Port containing device to be examined * @dev: Device to be examined * * After the IDENTIFY [PACKET] DEVICE step is complete, and a @@ -533,12 +623,14 @@ static void sil_thaw(struct ata_port *ap) * appreciated. * - But then again UDMA5 is hardly anything to complain about */ -static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) +static void sil_dev_config(struct ata_device *dev) { + struct ata_port *ap = dev->link->ap; + int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO; unsigned int n, quirks = 0; - unsigned char model_num[41]; + unsigned char model_num[ATA_ID_PROD_LEN + 1]; - ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num)); + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); for (n = 0; sil_blacklist[n].product; n++) if (!strcmp(sil_blacklist[n].product, model_num)) { @@ -550,25 +642,27 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) if (slow_down || ((ap->flags & SIL_FLAG_MOD15WRITE) && (quirks & SIL_QUIRK_MOD15WRITE))) { - ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix " - "(mod15write workaround)\n"); + if (print_info) + ata_dev_info(dev, + "applying Seagate errata fix (mod15write workaround)\n"); dev->max_sectors = 15; return; } /* limit to udma5 */ if (quirks & SIL_QUIRK_UDMA5MAX) { - ata_dev_printk(dev, KERN_INFO, - "applying Maxtor errata fix %s\n", model_num); + if (print_info) + ata_dev_info(dev, "applying Maxtor errata fix %s\n", + model_num); dev->udma_mask &= ATA_UDMA5; return; } } -static void sil_init_controller(struct pci_dev *pdev, - int n_ports, unsigned long port_flags, - void __iomem *mmio_base) +static void sil_init_controller(struct ata_host *host) { + struct pci_dev *pdev = to_pci_dev(host->dev); + void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; u8 cls; u32 tmp; int i; @@ -578,31 +672,30 @@ static void sil_init_controller(struct pci_dev *pdev, if (cls) { cls >>= 3; cls++; /* cls = (line_size/8)+1 */ - for (i = 0; i < n_ports; i++) + for (i = 0; i < host->n_ports; i++) writew(cls << 8 | cls, mmio_base + sil_port[i].fifo_cfg); } else - dev_printk(KERN_WARNING, &pdev->dev, - "cache line size not set. Driver may not function\n"); + dev_warn(&pdev->dev, + "cache line size not set. Driver may not function\n"); /* Apply R_ERR on DMA activate FIS errata workaround */ - if (port_flags & SIL_FLAG_RERR_ON_DMA_ACT) { + if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) { int cnt; - for (i = 0, cnt = 0; i < n_ports; i++) { + for (i = 0, cnt = 0; i < host->n_ports; i++) { tmp = readl(mmio_base + sil_port[i].sfis_cfg); if ((tmp & 0x3) != 0x01) continue; if (!cnt) - dev_printk(KERN_INFO, &pdev->dev, - "Applying R_ERR on DMA activate " - "FIS errata fix\n"); + dev_info(&pdev->dev, + "Applying R_ERR on DMA activate FIS errata fix\n"); writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg); cnt++; } } - if (n_ports == 4) { + if (host->n_ports == 4) { /* flip the magic "make 4 ports work" bit */ tmp = readl(mmio_base + sil_port[2].bmdma); if ((tmp & SIL_INTR_STEERING) == 0) @@ -611,118 +704,119 @@ static void sil_init_controller(struct pci_dev *pdev, } } -static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +static bool sil_broken_system_poweroff(struct pci_dev *pdev) +{ + static const struct dmi_system_id broken_systems[] = { + { + .ident = "HP Compaq nx6325", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), + }, + /* PCI slot number of the controller */ + .driver_data = (void *)0x12UL, + }, + + { } /* terminate list */ + }; + const struct dmi_system_id *dmi = dmi_first_match(broken_systems); + + if (dmi) { + unsigned long slot = (unsigned long)dmi->driver_data; + /* apply the quirk only to on-board controllers */ + return slot == PCI_SLOT(pdev->devfn); + } + + return false; +} + +static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - struct ata_probe_ent *probe_ent = NULL; - unsigned long base; + int board_id = ent->driver_data; + struct ata_port_info pi = sil_port_info[board_id]; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct ata_host *host; void __iomem *mmio_base; - int rc; + int n_ports, rc; unsigned int i; - int pci_dev_busy = 0; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); + + /* allocate host */ + n_ports = 2; + if (board_id == sil_3114) + n_ports = 4; - rc = pci_enable_device(pdev); + if (sil_broken_system_poweroff(pdev)) { + pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN | + ATA_FLAG_NO_HIBERNATE_SPINDOWN; + dev_info(&pdev->dev, "quirky BIOS, skipping spindown " + "on poweroff and hibernation\n"); + } + + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); + if (!host) + return -ENOMEM; + + /* acquire resources and fill host */ + rc = pcim_enable_device(pdev); if (rc) return rc; - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - pci_dev_busy = 1; - goto err_out; - } + rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + host->iomap = pcim_iomap_table(pdev); rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) - goto err_out_regions; + return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) - goto err_out_regions; - - probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); - if (probe_ent == NULL) { - rc = -ENOMEM; - goto err_out_regions; - } + return rc; - INIT_LIST_HEAD(&probe_ent->node); - probe_ent->dev = pci_dev_to_dev(pdev); - probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops; - probe_ent->sht = sil_port_info[ent->driver_data].sht; - probe_ent->n_ports = (ent->driver_data == sil_3114) ? 4 : 2; - probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask; - probe_ent->mwdma_mask = sil_port_info[ent->driver_data].mwdma_mask; - probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask; - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->port_flags = sil_port_info[ent->driver_data].flags; - - mmio_base = pci_iomap(pdev, 5, 0); - if (mmio_base == NULL) { - rc = -ENOMEM; - goto err_out_free_ent; - } + mmio_base = host->iomap[SIL_MMIO_BAR]; - probe_ent->mmio_base = mmio_base; + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + struct ata_ioports *ioaddr = &ap->ioaddr; - base = (unsigned long) mmio_base; + ioaddr->cmd_addr = mmio_base + sil_port[i].tf; + ioaddr->altstatus_addr = + ioaddr->ctl_addr = mmio_base + sil_port[i].ctl; + ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; + ioaddr->scr_addr = mmio_base + sil_port[i].scr; + ata_sff_std_ports(ioaddr); - for (i = 0; i < probe_ent->n_ports; i++) { - probe_ent->port[i].cmd_addr = base + sil_port[i].tf; - probe_ent->port[i].altstatus_addr = - probe_ent->port[i].ctl_addr = base + sil_port[i].ctl; - probe_ent->port[i].bmdma_addr = base + sil_port[i].bmdma; - probe_ent->port[i].scr_addr = base + sil_port[i].scr; - ata_std_ports(&probe_ent->port[i]); + ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio"); + ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf"); } - sil_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags, - mmio_base); + /* initialize and activate */ + sil_init_controller(host); pci_set_master(pdev); - - /* FIXME: check ata_device_add return value */ - ata_device_add(probe_ent); - kfree(probe_ent); - - return 0; - -err_out_free_ent: - kfree(probe_ent); -err_out_regions: - pci_release_regions(pdev); -err_out: - if (!pci_dev_busy) - pci_disable_device(pdev); - return rc; + return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED, + &sil_sht); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int sil_pci_device_resume(struct pci_dev *pdev) { - struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct ata_host *host = pci_get_drvdata(pdev); + int rc; + + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; - ata_pci_device_do_resume(pdev); - sil_init_controller(pdev, host->n_ports, host->ports[0]->flags, - host->mmio_base); + sil_init_controller(host); ata_host_resume(host); return 0; } #endif -static int __init sil_init(void) -{ - return pci_register_driver(&sil_pci_driver); -} - -static void __exit sil_exit(void) -{ - pci_unregister_driver(&sil_pci_driver); -} - - -module_init(sil_init); -module_exit(sil_exit); +module_pci_driver(sil_pci_driver); diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 39cb07baeba..0534890f118 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/gfp.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/delay.h> @@ -28,10 +29,9 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> -#include <asm/io.h> #define DRV_NAME "sata_sil24" -#define DRV_VERSION "0.3" +#define DRV_VERSION "1.1" /* * Port request block (PRB) 32 bytes @@ -52,15 +52,26 @@ struct sil24_sge { __le32 flags; }; -/* - * Port multiplier - */ -struct sil24_port_multiplier { - __le32 diag; - __le32 sactive; -}; enum { + SIL24_HOST_BAR = 0, + SIL24_PORT_BAR = 2, + + /* sil24 fetches in chunks of 64bytes. The first block + * contains the PRB and two SGEs. From the second block, it's + * consisted of four SGEs and called SGT. Calculate the + * number of SGTs that fit into one page. + */ + SIL24_PRB_SZ = sizeof(struct sil24_prb) + + 2 * sizeof(struct sil24_sge), + SIL24_MAX_SGT = (PAGE_SIZE - SIL24_PRB_SZ) + / (4 * sizeof(struct sil24_sge)), + + /* This will give us one unused SGEs for ATA. This extra SGE + * will be used to store CDB for ATAPI devices. + */ + SIL24_MAX_SGE = 4 * SIL24_MAX_SGT + 1, + /* * Global controller registers (128 bytes @ BAR0) */ @@ -100,10 +111,14 @@ enum { */ PORT_REGS_SIZE = 0x2000, - PORT_LRAM = 0x0000, /* 31 LRAM slots and PM regs */ + PORT_LRAM = 0x0000, /* 31 LRAM slots and PMP regs */ PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */ - PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */ + PORT_PMP = 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */ + PORT_PMP_STATUS = 0x0000, /* port device status offset */ + PORT_PMP_QACTIVE = 0x0004, /* port device QActive offset */ + PORT_PMP_SIZE = 0x0008, /* 8 bytes per PMP */ + /* 32 bit regs */ PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */ PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */ @@ -126,6 +141,7 @@ enum { PORT_PHY_CFG = 0x1050, PORT_SLOT_STAT = 0x1800, PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */ + PORT_CONTEXT = 0x1e04, PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */ PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */ PORT_SCONTROL = 0x1f00, @@ -139,9 +155,9 @@ enum { PORT_CS_INIT = (1 << 2), /* port initialize */ PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */ PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */ - PORT_CS_RESUME = (1 << 6), /* port resume */ + PORT_CS_PMP_RESUME = (1 << 6), /* PMP resume */ PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */ - PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */ + PORT_CS_PMP_EN = (1 << 13), /* port multiplier enable */ PORT_CS_RDY = (1 << 31), /* port ready to accept commands */ /* PORT_IRQ_STAT/ENABLE_SET/CLR */ @@ -161,7 +177,7 @@ enum { DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG | - PORT_IRQ_UNK_FIS, + PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY, /* bits[27:16] are unmasked (raw) */ PORT_IRQ_RAW_SHIFT = 16, @@ -228,9 +244,9 @@ enum { BID_SIL3131 = 2, /* host flags */ - SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | - ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY, + SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | + ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | + ATA_FLAG_AN | ATA_FLAG_PMP, SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ IRQ_STAT_4PORTS = 0xf, @@ -238,13 +254,13 @@ enum { struct sil24_ata_block { struct sil24_prb prb; - struct sil24_sge sge[LIBATA_MAX_PRD]; + struct sil24_sge sge[SIL24_MAX_SGE]; }; struct sil24_atapi_block { struct sil24_prb prb; u8 cdb[16]; - struct sil24_sge sge[LIBATA_MAX_PRD - 1]; + struct sil24_sge sge[SIL24_MAX_SGE]; }; union sil24_cmd_block { @@ -252,55 +268,55 @@ union sil24_cmd_block { struct sil24_atapi_block atapi; }; -static struct sil24_cerr_info { +static const struct sil24_cerr_info { unsigned int err_mask, action; const char *desc; } sil24_cerr_db[] = { - [0] = { AC_ERR_DEV, ATA_EH_REVALIDATE, + [0] = { AC_ERR_DEV, 0, "device error" }, - [PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE, + [PORT_CERR_DEV] = { AC_ERR_DEV, 0, "device error via D2H FIS" }, - [PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE, + [PORT_CERR_SDB] = { AC_ERR_DEV, 0, "device error via SDB FIS" }, - [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_RESET, "error in data FIS" }, - [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_RESET, "failed to transmit command FIS" }, - [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET, + [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET, "protocol mismatch" }, - [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET, + [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET, "data directon mismatch" }, - [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET, + [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET, "ran out of SGEs while writing" }, - [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET, + [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET, "ran out of SGEs while reading" }, - [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET, + [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET, "invalid data directon for ATAPI CDB" }, - [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET, - "SGT no on qword boundary" }, - [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, + "SGT not on qword boundary" }, + [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, "PCI target abort while fetching SGT" }, - [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, "PCI master abort while fetching SGT" }, - [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, "PCI parity error while fetching SGT" }, - [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET, + [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, "PRB not on qword boundary" }, - [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, "PCI target abort while fetching PRB" }, - [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, "PCI master abort while fetching PRB" }, - [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, "PCI parity error while fetching PRB" }, - [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_RESET, "undefined error while transferring data" }, - [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, "PCI target abort while transferring data" }, - [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, "PCI master abort while transferring data" }, - [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET, + [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, "PCI parity error while transferring data" }, - [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET, + [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_RESET, "FIS received while sending service FIS" }, }; @@ -313,42 +329,46 @@ static struct sil24_cerr_info { struct sil24_port_priv { union sil24_cmd_block *cmd_block; /* 32 cmd blocks */ dma_addr_t cmd_block_dma; /* DMA base addr for them */ - struct ata_taskfile tf; /* Cached taskfile registers */ -}; - -/* ap->host->private_data */ -struct sil24_host_priv { - void __iomem *host_base; /* global controller control (128 bytes @BAR0) */ - void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */ + int do_port_rst; }; -static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev); -static u8 sil24_check_status(struct ata_port *ap); -static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); -static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); -static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); +static void sil24_dev_config(struct ata_device *dev); +static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val); +static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val); +static int sil24_qc_defer(struct ata_queued_cmd *qc); static void sil24_qc_prep(struct ata_queued_cmd *qc); static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); -static void sil24_irq_clear(struct ata_port *ap); -static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); +static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); +static void sil24_pmp_attach(struct ata_port *ap); +static void sil24_pmp_detach(struct ata_port *ap); static void sil24_freeze(struct ata_port *ap); static void sil24_thaw(struct ata_port *ap); +static int sil24_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static int sil24_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); static void sil24_error_handler(struct ata_port *ap); static void sil24_post_internal_cmd(struct ata_queued_cmd *qc); static int sil24_port_start(struct ata_port *ap); -static void sil24_port_stop(struct ata_port *ap); -static void sil24_host_stop(struct ata_host *host); static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int sil24_pci_device_resume(struct pci_dev *pdev); #endif +#ifdef CONFIG_PM +static int sil24_port_resume(struct ata_port *ap); +#endif static const struct pci_device_id sil24_pci_tbl[] = { - { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, - { 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, - { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 }, - { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, - { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, + { PCI_VDEVICE(CMD, 0x3124), BID_SIL3124 }, + { PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 }, + { PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 }, + { PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 }, + { PCI_VDEVICE(CMD, 0x0244), BID_SIL3132 }, + { PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 }, + { PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 }, + { } /* terminate list */ }; @@ -356,64 +376,53 @@ static struct pci_driver sil24_pci_driver = { .name = DRV_NAME, .id_table = sil24_pci_tbl, .probe = sil24_init_one, - .remove = ata_pci_remove_one, /* safe? */ -#ifdef CONFIG_PM + .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP .suspend = ata_pci_device_suspend, .resume = sil24_pci_device_resume, #endif }; static struct scsi_host_template sil24_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .change_queue_depth = ata_scsi_change_queue_depth, + ATA_NCQ_SHT(DRV_NAME), .can_queue = SIL24_MAX_CMDS, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, + .sg_tablesize = SIL24_MAX_SGE, .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, - .suspend = ata_scsi_device_suspend, - .resume = ata_scsi_device_resume, }; -static const struct ata_port_operations sil24_ops = { - .port_disable = ata_port_disable, - - .dev_config = sil24_dev_config, - - .check_status = sil24_check_status, - .check_altstatus = sil24_check_status, - .dev_select = ata_noop_dev_select, - - .tf_read = sil24_tf_read, +static struct ata_port_operations sil24_ops = { + .inherits = &sata_pmp_port_ops, + .qc_defer = sil24_qc_defer, .qc_prep = sil24_qc_prep, .qc_issue = sil24_qc_issue, - - .irq_handler = sil24_interrupt, - .irq_clear = sil24_irq_clear, - - .scr_read = sil24_scr_read, - .scr_write = sil24_scr_write, + .qc_fill_rtf = sil24_qc_fill_rtf, .freeze = sil24_freeze, .thaw = sil24_thaw, + .softreset = sil24_softreset, + .hardreset = sil24_hardreset, + .pmp_softreset = sil24_softreset, + .pmp_hardreset = sil24_pmp_hardreset, .error_handler = sil24_error_handler, .post_internal_cmd = sil24_post_internal_cmd, + .dev_config = sil24_dev_config, + + .scr_read = sil24_scr_read, + .scr_write = sil24_scr_write, + .pmp_attach = sil24_pmp_attach, + .pmp_detach = sil24_pmp_detach, .port_start = sil24_port_start, - .port_stop = sil24_port_stop, - .host_stop = sil24_host_stop, +#ifdef CONFIG_PM + .port_resume = sil24_port_resume, +#endif }; +static bool sata_sil24_msi; /* Disable MSI */ +module_param_named(msi, sata_sil24_msi, bool, S_IRUGO); +MODULE_PARM_DESC(msi, "Enable MSI (Default: false)"); + /* * Use bits 30-31 of port_flags to encode available port numbers. * Current maxium is 4. @@ -421,33 +430,30 @@ static const struct ata_port_operations sil24_ops = { #define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30) #define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1) -static struct ata_port_info sil24_port_info[] = { +static const struct ata_port_info sil24_port_info[] = { /* sil_3124 */ { - .sht = &sil24_sht, .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) | SIL24_FLAG_PCIX_IRQ_WOC, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x3f, /* udma0-5 */ + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &sil24_ops, }, /* sil_3132 */ { - .sht = &sil24_sht, .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2), - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x3f, /* udma0-5 */ + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &sil24_ops, }, /* sil_3131/sil_3531 */ { - .sht = &sil24_sht, .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1), - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x3f, /* udma0-5 */ + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA5, .port_ops = &sil24_ops, }, }; @@ -459,9 +465,19 @@ static int sil24_tag(int tag) return tag; } -static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev) +static unsigned long sil24_port_offset(struct ata_port *ap) +{ + return ap->port_no * PORT_REGS_SIZE; +} + +static void __iomem *sil24_port_base(struct ata_port *ap) +{ + return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap); +} + +static void sil24_dev_config(struct ata_device *dev) { - void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; + void __iomem *port = sil24_port_base(dev->link->ap); if (dev->cdb_len == 16) writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); @@ -469,21 +485,15 @@ static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev) writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); } -static inline void sil24_update_tf(struct ata_port *ap) +static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf) { - struct sil24_port_priv *pp = ap->private_data; - void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; - struct sil24_prb __iomem *prb = port; + void __iomem *port = sil24_port_base(ap); + struct sil24_prb __iomem *prb; u8 fis[6 * 4]; - memcpy_fromio(fis, prb->fis, 6 * 4); - ata_tf_from_fis(fis, &pp->tf); -} - -static u8 sil24_check_status(struct ata_port *ap) -{ - struct sil24_port_priv *pp = ap->private_data; - return pp->tf.command; + prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ; + memcpy_fromio(fis, prb->fis, sizeof(fis)); + ata_tf_from_fis(fis, tf); } static int sil24_scr_map[] = { @@ -493,138 +503,255 @@ static int sil24_scr_map[] = { [SCR_ACTIVE] = 3, }; -static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg) +static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val) { - void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; + void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL; + if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { - void __iomem *addr; - addr = scr_addr + sil24_scr_map[sc_reg] * 4; - return readl(scr_addr + sil24_scr_map[sc_reg] * 4); + *val = readl(scr_addr + sil24_scr_map[sc_reg] * 4); + return 0; } - return 0xffffffffU; + return -EINVAL; } -static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) +static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val) { - void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; + void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL; + if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { - void __iomem *addr; - addr = scr_addr + sil24_scr_map[sc_reg] * 4; writel(val, scr_addr + sil24_scr_map[sc_reg] * 4); + return 0; } + return -EINVAL; } -static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +static void sil24_config_port(struct ata_port *ap) { - struct sil24_port_priv *pp = ap->private_data; - *tf = pp->tf; + void __iomem *port = sil24_port_base(ap); + + /* configure IRQ WoC */ + if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) + writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); + else + writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); + + /* zero error counters. */ + writew(0x8000, port + PORT_DECODE_ERR_THRESH); + writew(0x8000, port + PORT_CRC_ERR_THRESH); + writew(0x8000, port + PORT_HSHK_ERR_THRESH); + writew(0x0000, port + PORT_DECODE_ERR_CNT); + writew(0x0000, port + PORT_CRC_ERR_CNT); + writew(0x0000, port + PORT_HSHK_ERR_CNT); + + /* always use 64bit activation */ + writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); + + /* clear port multiplier enable and resume bits */ + writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR); +} + +static void sil24_config_pmp(struct ata_port *ap, int attached) +{ + void __iomem *port = sil24_port_base(ap); + + if (attached) + writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT); + else + writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR); +} + +static void sil24_clear_pmp(struct ata_port *ap) +{ + void __iomem *port = sil24_port_base(ap); + int i; + + writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR); + + for (i = 0; i < SATA_PMP_MAX_PORTS; i++) { + void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE; + + writel(0, pmp_base + PORT_PMP_STATUS); + writel(0, pmp_base + PORT_PMP_QACTIVE); + } } static int sil24_init_port(struct ata_port *ap) { - void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; + void __iomem *port = sil24_port_base(ap); + struct sil24_port_priv *pp = ap->private_data; u32 tmp; + /* clear PMP error status */ + if (sata_pmp_attached(ap)) + sil24_clear_pmp(ap); + writel(PORT_CS_INIT, port + PORT_CTRL_STAT); - ata_wait_register(port + PORT_CTRL_STAT, + ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_INIT, PORT_CS_INIT, 10, 100); - tmp = ata_wait_register(port + PORT_CTRL_STAT, + tmp = ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0, 10, 100); - if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) + if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) { + pp->do_port_rst = 1; + ap->link.eh_context.i.action |= ATA_EH_RESET; return -EIO; + } + return 0; } -static int sil24_softreset(struct ata_port *ap, unsigned int *class) +static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp, + const struct ata_taskfile *tf, + int is_cmd, u32 ctrl, + unsigned long timeout_msec) { - void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; + void __iomem *port = sil24_port_base(ap); struct sil24_port_priv *pp = ap->private_data; struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; dma_addr_t paddr = pp->cmd_block_dma; - u32 mask, irq_stat; - const char *reason; - - DPRINTK("ENTER\n"); - - if (ata_port_offline(ap)) { - DPRINTK("PHY reports no device\n"); - *class = ATA_DEV_NONE; - goto out; - } + u32 irq_enabled, irq_mask, irq_stat; + int rc; - /* put the port into known state */ - if (sil24_init_port(ap)) { - reason ="port not ready"; - goto err; - } + prb->ctrl = cpu_to_le16(ctrl); + ata_tf_to_fis(tf, pmp, is_cmd, prb->fis); - /* do SRST */ - prb->ctrl = cpu_to_le16(PRB_CTRL_SRST); - prb->fis[1] = 0; /* no PM yet */ + /* temporarily plug completion and error interrupts */ + irq_enabled = readl(port + PORT_IRQ_ENABLE_SET); + writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR); + /* + * The barrier is required to ensure that writes to cmd_block reach + * the memory before the write to PORT_CMD_ACTIVATE. + */ + wmb(); writel((u32)paddr, port + PORT_CMD_ACTIVATE); writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); - mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT; - irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0, - 100, ATA_TMOUT_BOOT / HZ * 1000); + irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT; + irq_stat = ata_wait_register(ap, port + PORT_IRQ_STAT, irq_mask, 0x0, + 10, timeout_msec); - writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */ + writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */ irq_stat >>= PORT_IRQ_RAW_SHIFT; - if (!(irq_stat & PORT_IRQ_COMPLETE)) { + if (irq_stat & PORT_IRQ_COMPLETE) + rc = 0; + else { + /* force port into known state */ + sil24_init_port(ap); + if (irq_stat & PORT_IRQ_ERROR) - reason = "SRST command error"; + rc = -EIO; else - reason = "timeout"; + rc = -EBUSY; + } + + /* restore IRQ enabled */ + writel(irq_enabled, port + PORT_IRQ_ENABLE_SET); + + return rc; +} + +static int sil24_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + int pmp = sata_srst_pmp(link); + unsigned long timeout_msec = 0; + struct ata_taskfile tf; + const char *reason; + int rc; + + DPRINTK("ENTER\n"); + + /* put the port into known state */ + if (sil24_init_port(ap)) { + reason = "port not ready"; goto err; } - sil24_update_tf(ap); - *class = ata_dev_classify(&pp->tf); + /* do SRST */ + if (time_after(deadline, jiffies)) + timeout_msec = jiffies_to_msecs(deadline - jiffies); + + ata_tf_init(link->device, &tf); /* doesn't really matter */ + rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST, + timeout_msec); + if (rc == -EBUSY) { + reason = "timeout"; + goto err; + } else if (rc) { + reason = "SRST command error"; + goto err; + } - if (*class == ATA_DEV_UNKNOWN) - *class = ATA_DEV_NONE; + sil24_read_tf(ap, 0, &tf); + *class = ata_dev_classify(&tf); - out: DPRINTK("EXIT, class=%u\n", *class); return 0; err: - ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); + ata_link_err(link, "softreset failed (%s)\n", reason); return -EIO; } -static int sil24_hardreset(struct ata_port *ap, unsigned int *class) +static int sil24_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) { - void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; + struct ata_port *ap = link->ap; + void __iomem *port = sil24_port_base(ap); + struct sil24_port_priv *pp = ap->private_data; + int did_port_rst = 0; const char *reason; int tout_msec, rc; u32 tmp; + retry: + /* Sometimes, DEV_RST is not enough to recover the controller. + * This happens often after PM DMA CS errata. + */ + if (pp->do_port_rst) { + ata_port_warn(ap, + "controller in dubious state, performing PORT_RST\n"); + + writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT); + ata_msleep(ap, 10); + writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); + ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0, + 10, 5000); + + /* restore port configuration */ + sil24_config_port(ap); + sil24_config_pmp(ap, ap->nr_pmp_links); + + pp->do_port_rst = 0; + did_port_rst = 1; + } + /* sil24 does the right thing(tm) without any protection */ - sata_set_spd(ap); + sata_set_spd(link); tout_msec = 100; - if (ata_port_online(ap)) + if (ata_link_online(link)) tout_msec = 5000; writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); - tmp = ata_wait_register(port + PORT_CTRL_STAT, - PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec); + tmp = ata_wait_register(ap, port + PORT_CTRL_STAT, + PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, + tout_msec); /* SStatus oscillates between zero and valid status after * DEV_RST, debounce it. */ - rc = sata_phy_debounce(ap, sata_deb_timing_long); + rc = sata_link_debounce(link, sata_deb_timing_long, deadline); if (rc) { reason = "PHY debouncing failed"; goto err; } if (tmp & PORT_CS_DEV_RST) { - if (ata_port_offline(ap)) + if (ata_link_offline(link)) return 0; reason = "link not ready"; goto err; @@ -639,7 +766,12 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class) return -EAGAIN; err: - ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason); + if (!did_port_rst) { + pp->do_port_rst = 1; + goto retry; + } + + ata_link_err(link, "hardreset failed (%s)\n", reason); return -EIO; } @@ -647,19 +779,64 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc, struct sil24_sge *sge) { struct scatterlist *sg; - unsigned int idx = 0; + struct sil24_sge *last_sge = NULL; + unsigned int si; - ata_for_each_sg(sg, qc) { + for_each_sg(qc->sg, sg, qc->n_elem, si) { sge->addr = cpu_to_le64(sg_dma_address(sg)); sge->cnt = cpu_to_le32(sg_dma_len(sg)); - if (ata_sg_is_last(sg, qc)) - sge->flags = cpu_to_le32(SGE_TRM); - else - sge->flags = 0; + sge->flags = 0; + last_sge = sge; sge++; - idx++; } + + last_sge->flags = cpu_to_le32(SGE_TRM); +} + +static int sil24_qc_defer(struct ata_queued_cmd *qc) +{ + struct ata_link *link = qc->dev->link; + struct ata_port *ap = link->ap; + u8 prot = qc->tf.protocol; + + /* + * There is a bug in the chip: + * Port LRAM Causes the PRB/SGT Data to be Corrupted + * If the host issues a read request for LRAM and SActive registers + * while active commands are available in the port, PRB/SGT data in + * the LRAM can become corrupted. This issue applies only when + * reading from, but not writing to, the LRAM. + * + * Therefore, reading LRAM when there is no particular error [and + * other commands may be outstanding] is prohibited. + * + * To avoid this bug there are two situations where a command must run + * exclusive of any other commands on the port: + * + * - ATAPI commands which check the sense data + * - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF + * set. + * + */ + int is_excl = (ata_is_atapi(prot) || + (qc->flags & ATA_QCFLAG_RESULT_TF)); + + if (unlikely(ap->excl_link)) { + if (link == ap->excl_link) { + if (ap->nr_active_links) + return ATA_DEFER_PORT; + qc->flags |= ATA_QCFLAG_CLEAR_EXCL; + } else + return ATA_DEFER_PORT; + } else if (unlikely(is_excl)) { + ap->excl_link = link; + if (ap->nr_active_links) + return ATA_DEFER_PORT; + qc->flags |= ATA_QCFLAG_CLEAR_EXCL; + } + + return ata_std_qc_defer(qc); } static void sil24_qc_prep(struct ata_queued_cmd *qc) @@ -673,39 +850,36 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) cb = &pp->cmd_block[sil24_tag(qc->tag)]; - switch (qc->tf.protocol) { - case ATA_PROT_PIO: - case ATA_PROT_DMA: - case ATA_PROT_NCQ: - case ATA_PROT_NODATA: + if (!ata_is_atapi(qc->tf.protocol)) { prb = &cb->ata.prb; sge = cb->ata.sge; - break; - - case ATA_PROT_ATAPI: - case ATA_PROT_ATAPI_DMA: - case ATA_PROT_ATAPI_NODATA: + if (ata_is_data(qc->tf.protocol)) { + u16 prot = 0; + ctrl = PRB_CTRL_PROTOCOL; + if (ata_is_ncq(qc->tf.protocol)) + prot |= PRB_PROT_NCQ; + if (qc->tf.flags & ATA_TFLAG_WRITE) + prot |= PRB_PROT_WRITE; + else + prot |= PRB_PROT_READ; + prb->prot = cpu_to_le16(prot); + } + } else { prb = &cb->atapi.prb; sge = cb->atapi.sge; - memset(cb->atapi.cdb, 0, 32); + memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb)); memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len); - if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { + if (ata_is_data(qc->tf.protocol)) { if (qc->tf.flags & ATA_TFLAG_WRITE) ctrl = PRB_CTRL_PACKET_WRITE; else ctrl = PRB_CTRL_PACKET_READ; } - break; - - default: - prb = NULL; /* shut up, gcc */ - sge = NULL; - BUG(); } prb->ctrl = cpu_to_le16(ctrl); - ata_tf_to_fis(&qc->tf, prb->fis, 0); + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis); if (qc->flags & ATA_QCFLAG_DMAMAP) sil24_fill_sg(qc, sge); @@ -715,7 +889,7 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct sil24_port_priv *pp = ap->private_data; - void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; + void __iomem *port = sil24_port_base(ap); unsigned int tag = sil24_tag(qc->tag); dma_addr_t paddr; void __iomem *activate; @@ -723,20 +897,63 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block); activate = port + PORT_CMD_ACTIVATE + tag * 8; + /* + * The barrier is required to ensure that writes to cmd_block reach + * the memory before the write to PORT_CMD_ACTIVATE. + */ + wmb(); writel((u32)paddr, activate); writel((u64)paddr >> 32, activate + 4); return 0; } -static void sil24_irq_clear(struct ata_port *ap) +static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc) +{ + sil24_read_tf(qc->ap, qc->tag, &qc->result_tf); + return true; +} + +static void sil24_pmp_attach(struct ata_port *ap) +{ + u32 *gscr = ap->link.device->gscr; + + sil24_config_pmp(ap, 1); + sil24_init_port(ap); + + if (sata_pmp_gscr_vendor(gscr) == 0x11ab && + sata_pmp_gscr_devid(gscr) == 0x4140) { + ata_port_info(ap, + "disabling NCQ support due to sil24-mv4140 quirk\n"); + ap->flags &= ~ATA_FLAG_NCQ; + } +} + +static void sil24_pmp_detach(struct ata_port *ap) { - /* unused */ + sil24_init_port(ap); + sil24_config_pmp(ap, 0); + + ap->flags |= ATA_FLAG_NCQ; +} + +static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + int rc; + + rc = sil24_init_port(link->ap); + if (rc) { + ata_link_err(link, "hardreset failed (port not ready)\n"); + return rc; + } + + return sata_std_hardreset(link, class, deadline); } static void sil24_freeze(struct ata_port *ap) { - void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; + void __iomem *port = sil24_port_base(ap); /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear * PORT_IRQ_ENABLE instead. @@ -746,7 +963,7 @@ static void sil24_freeze(struct ata_port *ap) static void sil24_thaw(struct ata_port *ap) { - void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; + void __iomem *port = sil24_port_base(ap); u32 tmp; /* clear IRQ */ @@ -759,9 +976,12 @@ static void sil24_thaw(struct ata_port *ap) static void sil24_error_intr(struct ata_port *ap) { - void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; - struct ata_eh_info *ehi = &ap->eh_info; - int freeze = 0; + void __iomem *port = sil24_port_base(ap); + struct sil24_port_priv *pp = ap->private_data; + struct ata_queued_cmd *qc = NULL; + struct ata_link *link; + struct ata_eh_info *ehi; + int abort = 0, freeze = 0; u32 irq_stat; /* on error, we need to clear IRQ explicitly */ @@ -769,31 +989,74 @@ static void sil24_error_intr(struct ata_port *ap) writel(irq_stat, port + PORT_IRQ_STAT); /* first, analyze and record host port events */ + link = &ap->link; + ehi = &link->eh_info; ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); + if (irq_stat & PORT_IRQ_SDB_NOTIFY) { + ata_ehi_push_desc(ehi, "SDB notify"); + sata_async_notification(ap); + } + if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) { ata_ehi_hotplugged(ehi); - ata_ehi_push_desc(ehi, ", %s", - irq_stat & PORT_IRQ_PHYRDY_CHG ? - "PHY RDY changed" : "device exchanged"); + ata_ehi_push_desc(ehi, "%s", + irq_stat & PORT_IRQ_PHYRDY_CHG ? + "PHY RDY changed" : "device exchanged"); freeze = 1; } if (irq_stat & PORT_IRQ_UNK_FIS) { ehi->err_mask |= AC_ERR_HSM; - ehi->action |= ATA_EH_SOFTRESET; - ata_ehi_push_desc(ehi , ", unknown FIS"); + ehi->action |= ATA_EH_RESET; + ata_ehi_push_desc(ehi, "unknown FIS"); freeze = 1; } /* deal with command error */ if (irq_stat & PORT_IRQ_ERROR) { - struct sil24_cerr_info *ci = NULL; + const struct sil24_cerr_info *ci = NULL; unsigned int err_mask = 0, action = 0; - struct ata_queued_cmd *qc; - u32 cerr; + u32 context, cerr; + int pmp; + + abort = 1; + + /* DMA Context Switch Failure in Port Multiplier Mode + * errata. If we have active commands to 3 or more + * devices, any error condition on active devices can + * corrupt DMA context switching. + */ + if (ap->nr_active_links >= 3) { + ehi->err_mask |= AC_ERR_OTHER; + ehi->action |= ATA_EH_RESET; + ata_ehi_push_desc(ehi, "PMP DMA CS errata"); + pp->do_port_rst = 1; + freeze = 1; + } + + /* find out the offending link and qc */ + if (sata_pmp_attached(ap)) { + context = readl(port + PORT_CONTEXT); + pmp = (context >> 5) & 0xf; + + if (pmp < ap->nr_pmp_links) { + link = &ap->pmp_link[pmp]; + ehi = &link->eh_info; + qc = ata_qc_from_tag(ap, link->active_tag); + + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "irq_stat 0x%08x", + irq_stat); + } else { + err_mask |= AC_ERR_HSM; + action |= ATA_EH_RESET; + freeze = 1; + } + } else + qc = ata_qc_from_tag(ap, link->active_tag); /* analyze CMD_ERR */ cerr = readl(port + PORT_CMD_ERR); @@ -803,44 +1066,57 @@ static void sil24_error_intr(struct ata_port *ap) if (ci && ci->desc) { err_mask |= ci->err_mask; action |= ci->action; - ata_ehi_push_desc(ehi, ", %s", ci->desc); + if (action & ATA_EH_RESET) + freeze = 1; + ata_ehi_push_desc(ehi, "%s", ci->desc); } else { err_mask |= AC_ERR_OTHER; - action |= ATA_EH_SOFTRESET; - ata_ehi_push_desc(ehi, ", unknown command error %d", + action |= ATA_EH_RESET; + freeze = 1; + ata_ehi_push_desc(ehi, "unknown command error %d", cerr); } /* record error info */ - qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc) { - sil24_update_tf(ap); + if (qc) qc->err_mask |= err_mask; - } else + else ehi->err_mask |= err_mask; ehi->action |= action; + + /* if PMP, resume */ + if (sata_pmp_attached(ap)) + writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT); } /* freeze or abort */ if (freeze) ata_port_freeze(ap); - else - ata_port_abort(ap); -} - -static void sil24_finish_qc(struct ata_queued_cmd *qc) -{ - if (qc->flags & ATA_QCFLAG_RESULT_TF) - sil24_update_tf(qc->ap); + else if (abort) { + if (qc) + ata_link_abort(qc->dev->link); + else + ata_port_abort(ap); + } } static inline void sil24_host_intr(struct ata_port *ap) { - void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; + void __iomem *port = sil24_port_base(ap); u32 slot_stat, qc_active; int rc; + /* If PCIX_IRQ_WOC, there's an inherent race window between + * clearing IRQ pending status and reading PORT_SLOT_STAT + * which may cause spurious interrupts afterwards. This is + * unavoidable and much better than losing interrupts which + * happens if IRQ pending is cleared after reading + * PORT_SLOT_STAT. + */ + if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) + writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT); + slot_stat = readl(port + PORT_SLOT_STAT); if (unlikely(slot_stat & HOST_SSTAT_ATTN)) { @@ -848,36 +1124,34 @@ static inline void sil24_host_intr(struct ata_port *ap) return; } - if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) - writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT); - qc_active = slot_stat & ~HOST_SSTAT_ATTN; - rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc); + rc = ata_qc_complete_multiple(ap, qc_active); if (rc > 0) return; if (rc < 0) { - struct ata_eh_info *ehi = &ap->eh_info; + struct ata_eh_info *ehi = &ap->link.eh_info; ehi->err_mask |= AC_ERR_HSM; - ehi->action |= ATA_EH_SOFTRESET; + ehi->action |= ATA_EH_RESET; ata_port_freeze(ap); return; } - if (ata_ratelimit()) - ata_port_printk(ap, KERN_INFO, "spurious interrupt " - "(slot_stat 0x%x active_tag %d sactive 0x%x)\n", - slot_stat, ap->active_tag, ap->sactive); + /* spurious interrupts are expected if PCIX_IRQ_WOC */ + if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit()) + ata_port_info(ap, + "spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n", + slot_stat, ap->link.active_tag, ap->link.sactive); } -static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs) +static irqreturn_t sil24_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; - struct sil24_host_priv *hpriv = host->private_data; + void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; unsigned handled = 0; u32 status; int i; - status = readl(hpriv->host_base + HOST_IRQ_STAT); + status = readl(host_base + HOST_IRQ_STAT); if (status == 0xffffffff) { printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, " @@ -892,13 +1166,8 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs * for (i = 0; i < host->n_ports; i++) if (status & (1 << i)) { - struct ata_port *ap = host->ports[i]; - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { - sil24_host_intr(host->ports[i]); - handled++; - } else - printk(KERN_ERR DRV_NAME - ": interrupt from disabled port %d\n", i); + sil24_host_intr(host->ports[i]); + handled++; } spin_unlock(&host->lock); @@ -908,35 +1177,23 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs * static void sil24_error_handler(struct ata_port *ap) { - struct ata_eh_context *ehc = &ap->eh_context; + struct sil24_port_priv *pp = ap->private_data; - if (sil24_init_port(ap)) { + if (sil24_init_port(ap)) ata_eh_freeze_port(ap); - ehc->i.action |= ATA_EH_HARDRESET; - } - /* perform recovery */ - ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset, - ata_std_postreset); + sata_pmp_error_handler(ap); + + pp->do_port_rst = 0; } static void sil24_post_internal_cmd(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - if (qc->flags & ATA_QCFLAG_FAILED) - qc->err_mask |= AC_ERR_OTHER; - /* make DMA engine forget about the failed command */ - if (qc->err_mask) - sil24_init_port(ap); -} - -static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev) -{ - const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS; - - dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma); + if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap)) + ata_eh_freeze_port(ap); } static int sil24_port_start(struct ata_port *ap) @@ -946,63 +1203,30 @@ static int sil24_port_start(struct ata_port *ap) union sil24_cmd_block *cb; size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS; dma_addr_t cb_dma; - int rc = -ENOMEM; - pp = kzalloc(sizeof(*pp), GFP_KERNEL); + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) - goto err_out; - - pp->tf.command = ATA_DRDY; + return -ENOMEM; - cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL); + cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL); if (!cb) - goto err_out_pp; + return -ENOMEM; memset(cb, 0, cb_size); - rc = ata_pad_alloc(ap, dev); - if (rc) - goto err_out_pad; - pp->cmd_block = cb; pp->cmd_block_dma = cb_dma; ap->private_data = pp; - return 0; - -err_out_pad: - sil24_cblk_free(pp, dev); -err_out_pp: - kfree(pp); -err_out: - return rc; -} - -static void sil24_port_stop(struct ata_port *ap) -{ - struct device *dev = ap->host->dev; - struct sil24_port_priv *pp = ap->private_data; - - sil24_cblk_free(pp, dev); - ata_pad_free(ap, dev); - kfree(pp); -} - -static void sil24_host_stop(struct ata_host *host) -{ - struct sil24_host_priv *hpriv = host->private_data; - struct pci_dev *pdev = to_pci_dev(host->dev); + ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host"); + ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port"); - pci_iounmap(pdev, hpriv->host_base); - pci_iounmap(pdev, hpriv->port_base); - kfree(hpriv); + return 0; } -static void sil24_init_controller(struct pci_dev *pdev, int n_ports, - unsigned long port_flags, - void __iomem *host_base, - void __iomem *port_base) +static void sil24_init_controller(struct ata_host *host) { + void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; u32 tmp; int i; @@ -1013,8 +1237,10 @@ static void sil24_init_controller(struct pci_dev *pdev, int n_ports, writel(0, host_base + HOST_CTRL); /* init ports */ - for (i = 0; i < n_ports; i++) { - void __iomem *port = port_base + i * PORT_REGS_SIZE; + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + void __iomem *port = sil24_port_base(ap); + /* Initial PHY setting */ writel(0x20c, port + PORT_PHY_CFG); @@ -1023,33 +1249,16 @@ static void sil24_init_controller(struct pci_dev *pdev, int n_ports, tmp = readl(port + PORT_CTRL_STAT); if (tmp & PORT_CS_PORT_RST) { writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); - tmp = ata_wait_register(port + PORT_CTRL_STAT, + tmp = ata_wait_register(NULL, port + PORT_CTRL_STAT, PORT_CS_PORT_RST, PORT_CS_PORT_RST, 10, 100); if (tmp & PORT_CS_PORT_RST) - dev_printk(KERN_ERR, &pdev->dev, - "failed to clear port RST\n"); + dev_err(host->dev, + "failed to clear port RST\n"); } - /* Configure IRQ WoC */ - if (port_flags & SIL24_FLAG_PCIX_IRQ_WOC) - writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); - else - writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); - - /* Zero error counters. */ - writel(0x8000, port + PORT_DECODE_ERR_THRESH); - writel(0x8000, port + PORT_CRC_ERR_THRESH); - writel(0x8000, port + PORT_HSHK_ERR_THRESH); - writel(0x0000, port + PORT_DECODE_ERR_CNT); - writel(0x0000, port + PORT_CRC_ERR_CNT); - writel(0x0000, port + PORT_HSHK_ERR_CNT); - - /* Always use 64bit activation */ - writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); - - /* Clear port multiplier enable and resume bits */ - writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); + /* configure port */ + sil24_config_port(ap); } /* Turn on interrupts */ @@ -1058,149 +1267,106 @@ static void sil24_init_controller(struct pci_dev *pdev, int n_ports, static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version = 0; - unsigned int board_id = (unsigned int)ent->driver_data; - struct ata_port_info *pinfo = &sil24_port_info[board_id]; - struct ata_probe_ent *probe_ent = NULL; - struct sil24_host_priv *hpriv = NULL; - void __iomem *host_base = NULL; - void __iomem *port_base = NULL; - int i, rc; + extern int __MARKER__sil24_cmd_block_is_sized_wrongly; + struct ata_port_info pi = sil24_port_info[ent->driver_data]; + const struct ata_port_info *ppi[] = { &pi, NULL }; + void __iomem * const *iomap; + struct ata_host *host; + int rc; u32 tmp; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + /* cause link error if sil24_cmd_block is sized wrongly */ + if (sizeof(union sil24_cmd_block) != PAGE_SIZE) + __MARKER__sil24_cmd_block_is_sized_wrongly = 1; + + ata_print_version_once(&pdev->dev, DRV_VERSION); - rc = pci_enable_device(pdev); + /* acquire resources */ + rc = pcim_enable_device(pdev); if (rc) return rc; - rc = pci_request_regions(pdev, DRV_NAME); + rc = pcim_iomap_regions(pdev, + (1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR), + DRV_NAME); if (rc) - goto out_disable; - - rc = -ENOMEM; - /* map mmio registers */ - host_base = pci_iomap(pdev, 0, 0); - if (!host_base) - goto out_free; - port_base = pci_iomap(pdev, 2, 0); - if (!port_base) - goto out_free; - - /* allocate & init probe_ent and hpriv */ - probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); - if (!probe_ent) - goto out_free; - - hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); - if (!hpriv) - goto out_free; - - probe_ent->dev = pci_dev_to_dev(pdev); - INIT_LIST_HEAD(&probe_ent->node); - - probe_ent->sht = pinfo->sht; - probe_ent->port_flags = pinfo->flags; - probe_ent->pio_mask = pinfo->pio_mask; - probe_ent->mwdma_mask = pinfo->mwdma_mask; - probe_ent->udma_mask = pinfo->udma_mask; - probe_ent->port_ops = pinfo->port_ops; - probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->flags); - - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->private_data = hpriv; - - hpriv->host_base = host_base; - hpriv->port_base = port_base; + return rc; + iomap = pcim_iomap_table(pdev); - /* - * Configure the device - */ - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + /* apply workaround for completion IRQ loss on PCI-X errata */ + if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) { + tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL); + if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL)) + dev_info(&pdev->dev, + "Applying completion IRQ loss on PCI-X errata fix\n"); + else + pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; + } + + /* allocate and fill host */ + host = ata_host_alloc_pinfo(&pdev->dev, ppi, + SIL24_FLAG2NPORTS(ppi[0]->flags)); + if (!host) + return -ENOMEM; + host->iomap = iomap; + + /* configure and activate the device */ + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) { - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "64-bit DMA enable failed\n"); - goto out_free; + dev_err(&pdev->dev, + "64-bit DMA enable failed\n"); + return rc; } } } else { - rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit DMA enable failed\n"); - goto out_free; + dev_err(&pdev->dev, "32-bit DMA enable failed\n"); + return rc; } - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit consistent DMA enable failed\n"); - goto out_free; + dev_err(&pdev->dev, + "32-bit consistent DMA enable failed\n"); + return rc; } } - /* Apply workaround for completion IRQ loss on PCI-X errata */ - if (probe_ent->port_flags & SIL24_FLAG_PCIX_IRQ_WOC) { - tmp = readl(host_base + HOST_CTRL); - if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL)) - dev_printk(KERN_INFO, &pdev->dev, - "Applying completion IRQ loss on PCI-X " - "errata fix\n"); - else - probe_ent->port_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; - } - - for (i = 0; i < probe_ent->n_ports; i++) { - unsigned long portu = - (unsigned long)port_base + i * PORT_REGS_SIZE; + /* Set max read request size to 4096. This slightly increases + * write throughput for pci-e variants. + */ + pcie_set_readrq(pdev, 4096); - probe_ent->port[i].cmd_addr = portu; - probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; + sil24_init_controller(host); - ata_std_ports(&probe_ent->port[i]); + if (sata_sil24_msi && !pci_enable_msi(pdev)) { + dev_info(&pdev->dev, "Using MSI\n"); + pci_intx(pdev, 0); } - sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags, - host_base, port_base); - pci_set_master(pdev); - - /* FIXME: check ata_device_add return value */ - ata_device_add(probe_ent); - - kfree(probe_ent); - return 0; - - out_free: - if (host_base) - pci_iounmap(pdev, host_base); - if (port_base) - pci_iounmap(pdev, port_base); - kfree(probe_ent); - kfree(hpriv); - pci_release_regions(pdev); - out_disable: - pci_disable_device(pdev); - return rc; + return ata_host_activate(host, pdev->irq, sil24_interrupt, IRQF_SHARED, + &sil24_sht); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int sil24_pci_device_resume(struct pci_dev *pdev) { - struct ata_host *host = dev_get_drvdata(&pdev->dev); - struct sil24_host_priv *hpriv = host->private_data; + struct ata_host *host = pci_get_drvdata(pdev); + void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; + int rc; - ata_pci_device_do_resume(pdev); + rc = ata_pci_device_do_resume(pdev); + if (rc) + return rc; if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) - writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL); + writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL); - sil24_init_controller(pdev, host->n_ports, host->ports[0]->flags, - hpriv->host_base, hpriv->port_base); + sil24_init_controller(host); ata_host_resume(host); @@ -1208,20 +1374,17 @@ static int sil24_pci_device_resume(struct pci_dev *pdev) } #endif -static int __init sil24_init(void) +#ifdef CONFIG_PM +static int sil24_port_resume(struct ata_port *ap) { - return pci_register_driver(&sil24_pci_driver); + sil24_config_pmp(ap, ap->nr_pmp_links); + return 0; } +#endif -static void __exit sil24_exit(void) -{ - pci_unregister_driver(&sil24_pci_driver); -} +module_pci_driver(sil24_pci_driver); MODULE_AUTHOR("Tejun Heo"); MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sil24_pci_tbl); - -module_init(sil24_init); -module_exit(sil24_exit); diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index 18d49fff8dc..d1637ac40a7 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c @@ -33,16 +33,16 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> +#include "sis.h" #define DRV_NAME "sata_sis" -#define DRV_VERSION "0.6" +#define DRV_VERSION "1.0" enum { sis_180 = 0, @@ -62,286 +62,253 @@ enum { GENCTL_IOMAPPED_SCR = (1 << 26), /* if set, SCRs are in IO space */ }; -static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); +static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static const struct pci_device_id sis_pci_tbl[] = { - { PCI_VENDOR_ID_SI, 0x180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 }, - { PCI_VENDOR_ID_SI, 0x181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 }, - { PCI_VENDOR_ID_SI, 0x182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 }, + { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */ + { PCI_VDEVICE(SI, 0x0181), sis_180 }, /* SiS 964/180 */ + { PCI_VDEVICE(SI, 0x0182), sis_180 }, /* SiS 965/965L */ + { PCI_VDEVICE(SI, 0x0183), sis_180 }, /* SiS 965/965L */ + { PCI_VDEVICE(SI, 0x1182), sis_180 }, /* SiS 966/680 */ + { PCI_VDEVICE(SI, 0x1183), sis_180 }, /* SiS 966/966L/968/680 */ + { } /* terminate list */ }; - static struct pci_driver sis_pci_driver = { .name = DRV_NAME, .id_table = sis_pci_tbl, .probe = sis_init_one, .remove = ata_pci_remove_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif }; static struct scsi_host_template sis_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = ATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; -static const struct ata_port_operations sis_ops = { - .port_disable = ata_port_disable, - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ata_bmdma_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, +static struct ata_port_operations sis_ops = { + .inherits = &ata_bmdma_port_ops, .scr_read = sis_scr_read, .scr_write = sis_scr_write, - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, }; -static struct ata_port_info sis_port_info = { - .sht = &sis_sht, - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, - .pio_mask = 0x1f, - .mwdma_mask = 0x7, - .udma_mask = 0x7f, +static const struct ata_port_info sis_port_info = { + .flags = ATA_FLAG_SATA, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &sis_ops, }; - MODULE_AUTHOR("Uwe Koziolek"); -MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller"); +MODULE_DESCRIPTION("low-level driver for Silicon Integrated Systems SATA controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sis_pci_tbl); MODULE_VERSION(DRV_VERSION); -static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg, int device) +static unsigned int get_scr_cfg_addr(struct ata_link *link, unsigned int sc_reg) { + struct ata_port *ap = link->ap; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); unsigned int addr = SIS_SCR_BASE + (4 * sc_reg); + u8 pmr; - if (port_no) { - if (device == 0x182) + if (ap->port_no) { + switch (pdev->device) { + case 0x0180: + case 0x0181: + pci_read_config_byte(pdev, SIS_PMR, &pmr); + if ((pmr & SIS_PMR_COMBINED) == 0) + addr += SIS180_SATA1_OFS; + break; + + case 0x0182: + case 0x0183: + case 0x1182: addr += SIS182_SATA1_OFS; - else - addr += SIS180_SATA1_OFS; + break; + } } + if (link->pmp) + addr += 0x10; return addr; } -static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) +static u32 sis_scr_cfg_read(struct ata_link *link, + unsigned int sc_reg, u32 *val) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device); - u32 val, val2 = 0; - u8 pmr; + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg); if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */ - return 0xffffffff; - - pci_read_config_byte(pdev, SIS_PMR, &pmr); - - pci_read_config_dword(pdev, cfg_addr, &val); + return -EINVAL; - if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) - pci_read_config_dword(pdev, cfg_addr+0x10, &val2); - - return val|val2; + pci_read_config_dword(pdev, cfg_addr, val); + return 0; } -static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) +static int sis_scr_cfg_write(struct ata_link *link, + unsigned int sc_reg, u32 val) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device); - u8 pmr; - - if (scr == SCR_ERROR) /* doesn't exist in PCI cfg space */ - return; - - pci_read_config_byte(pdev, SIS_PMR, &pmr); + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg); pci_write_config_dword(pdev, cfg_addr, val); - - if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) - pci_write_config_dword(pdev, cfg_addr+0x10, val); + return 0; } -static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 val, val2 = 0; - u8 pmr; + struct ata_port *ap = link->ap; + void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10; if (sc_reg > SCR_CONTROL) - return 0xffffffffU; + return -EINVAL; if (ap->flags & SIS_FLAG_CFGSCR) - return sis_scr_cfg_read(ap, sc_reg); + return sis_scr_cfg_read(link, sc_reg, val); - pci_read_config_byte(pdev, SIS_PMR, &pmr); - - val = inl(ap->ioaddr.scr_addr + (sc_reg * 4)); - - if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) - val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10); - - return val | val2; + *val = ioread32(base + sc_reg * 4); + return 0; } -static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u8 pmr; + struct ata_port *ap = link->ap; + void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10; if (sc_reg > SCR_CONTROL) - return; - - pci_read_config_byte(pdev, SIS_PMR, &pmr); + return -EINVAL; if (ap->flags & SIS_FLAG_CFGSCR) - sis_scr_cfg_write(ap, sc_reg, val); - else { - outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)); - if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) - outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10); - } + return sis_scr_cfg_write(link, sc_reg, val); + + iowrite32(val, base + (sc_reg * 4)); + return 0; } -static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - struct ata_probe_ent *probe_ent = NULL; - int rc; - u32 genctl; - struct ata_port_info *ppi[2]; - int pci_dev_busy = 0; + struct ata_port_info pi = sis_port_info; + const struct ata_port_info *ppi[] = { &pi, &pi }; + struct ata_host *host; + u32 genctl, val; u8 pmr; - u8 port2_start; + u8 port2_start = 0x20; + int i, rc; - if (!printed_version++) - dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - rc = pci_enable_device(pdev); + rc = pcim_enable_device(pdev); if (rc) return rc; - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - pci_dev_busy = 1; - goto err_out; - } - - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; - - ppi[0] = ppi[1] = &sis_port_info; - probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); - if (!probe_ent) { - rc = -ENOMEM; - goto err_out_regions; - } - /* check and see if the SCRs are in IO space or PCI cfg space */ pci_read_config_dword(pdev, SIS_GENCTL, &genctl); if ((genctl & GENCTL_IOMAPPED_SCR) == 0) - probe_ent->port_flags |= SIS_FLAG_CFGSCR; + pi.flags |= SIS_FLAG_CFGSCR; /* if hardware thinks SCRs are in IO space, but there are * no IO resources assigned, change to PCI cfg space. */ - if ((!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) && + if ((!(pi.flags & SIS_FLAG_CFGSCR)) && ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) || (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) { genctl &= ~GENCTL_IOMAPPED_SCR; pci_write_config_dword(pdev, SIS_GENCTL, genctl); - probe_ent->port_flags |= SIS_FLAG_CFGSCR; + pi.flags |= SIS_FLAG_CFGSCR; } pci_read_config_byte(pdev, SIS_PMR, &pmr); - if (ent->device != 0x182) { + switch (ent->device) { + case 0x0180: + case 0x0181: + + /* The PATA-handling is provided by pata_sis */ + switch (pmr & 0x30) { + case 0x10: + ppi[1] = &sis_info133_for_sata; + break; + + case 0x30: + ppi[0] = &sis_info133_for_sata; + break; + } if ((pmr & SIS_PMR_COMBINED) == 0) { - dev_printk(KERN_INFO, &pdev->dev, - "Detected SiS 180/181 chipset in SATA mode\n"); + dev_info(&pdev->dev, + "Detected SiS 180/181/964 chipset in SATA mode\n"); port2_start = 64; + } else { + dev_info(&pdev->dev, + "Detected SiS 180/181 chipset in combined mode\n"); + port2_start = 0; + pi.flags |= ATA_FLAG_SLAVE_POSS; } - else { - dev_printk(KERN_INFO, &pdev->dev, - "Detected SiS 180/181 chipset in combined mode\n"); - port2_start=0; + break; + + case 0x0182: + case 0x0183: + pci_read_config_dword(pdev, 0x6C, &val); + if (val & (1L << 31)) { + dev_info(&pdev->dev, "Detected SiS 182/965 chipset\n"); + pi.flags |= ATA_FLAG_SLAVE_POSS; + } else { + dev_info(&pdev->dev, "Detected SiS 182/965L chipset\n"); } - } - else { - dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n"); - port2_start = 0x20; - } - - if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) { - probe_ent->port[0].scr_addr = - pci_resource_start(pdev, SIS_SCR_PCI_BAR); - probe_ent->port[1].scr_addr = - pci_resource_start(pdev, SIS_SCR_PCI_BAR) + port2_start; + break; + + case 0x1182: + dev_info(&pdev->dev, + "Detected SiS 1182/966/680 SATA controller\n"); + pi.flags |= ATA_FLAG_SLAVE_POSS; + break; + + case 0x1183: + dev_info(&pdev->dev, + "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n"); + ppi[0] = &sis_info133_for_sata; + ppi[1] = &sis_info133_for_sata; + break; } - pci_set_master(pdev); - pci_intx(pdev, 1); - - /* FIXME: check ata_device_add return value */ - ata_device_add(probe_ent); - kfree(probe_ent); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); + if (rc) + return rc; - return 0; + for (i = 0; i < 2; i++) { + struct ata_port *ap = host->ports[i]; -err_out_regions: - pci_release_regions(pdev); + if (ap->flags & ATA_FLAG_SATA && + ap->flags & ATA_FLAG_SLAVE_POSS) { + rc = ata_slave_link_init(ap); + if (rc) + return rc; + } + } -err_out: - if (!pci_dev_busy) - pci_disable_device(pdev); - return rc; + if (!(pi.flags & SIS_FLAG_CFGSCR)) { + void __iomem *mmio; -} + rc = pcim_iomap_regions(pdev, 1 << SIS_SCR_PCI_BAR, DRV_NAME); + if (rc) + return rc; + mmio = host->iomap[SIS_SCR_PCI_BAR]; -static int __init sis_init(void) -{ - return pci_register_driver(&sis_pci_driver); -} + host->ports[0]->ioaddr.scr_addr = mmio; + host->ports[1]->ioaddr.scr_addr = mmio + port2_start; + } -static void __exit sis_exit(void) -{ - pci_unregister_driver(&sis_pci_driver); + pci_set_master(pdev); + pci_intx(pdev, 1); + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &sis_sht); } -module_init(sis_init); -module_exit(sis_exit); - +module_pci_driver(sis_pci_driver); diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index d6d6658d832..c630fa81262 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -39,12 +39,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi.h> #include <linux/libata.h> #ifdef CONFIG_PPC_OF @@ -53,9 +54,14 @@ #endif /* CONFIG_PPC_OF */ #define DRV_NAME "sata_svw" -#define DRV_VERSION "2.0" +#define DRV_VERSION "2.3" enum { + /* ap->flags bits */ + K2_FLAG_SATA_8_PORTS = (1 << 24), + K2_FLAG_NO_ATAPI_DMA = (1 << 25), + K2_FLAG_BAR_POS_3 = (1 << 26), + /* Taskfile registers offsets */ K2_SATA_TF_CMD_OFFSET = 0x00, K2_SATA_TF_DATA_OFFSET = 0x00, @@ -83,27 +89,91 @@ enum { /* Port stride */ K2_SATA_PORT_OFFSET = 0x100, + + chip_svw4 = 0, + chip_svw8 = 1, + chip_svw42 = 2, /* bar 3 */ + chip_svw43 = 3, /* bar 5 */ }; static u8 k2_stat_check_status(struct ata_port *ap); -static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) +{ + u8 cmnd = qc->scsicmd->cmnd[0]; + + if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) + return -1; /* ATAPI DMA not supported */ + else { + switch (cmnd) { + case READ_10: + case READ_12: + case READ_16: + case WRITE_10: + case WRITE_12: + case WRITE_16: + return 0; + + default: + return -1; + } + + } +} + +static int k2_sata_scr_read(struct ata_link *link, + unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; - return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); + return -EINVAL; + *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } -static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, - u32 val) +static int k2_sata_scr_write(struct ata_link *link, + unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return; - writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); + return -EINVAL; + writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; +} + +static int k2_sata_softreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return ata_sff_softreset(link, class, deadline); } +static int k2_sata_hardreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return sata_sff_hardreset(link, class, deadline); +} static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { @@ -116,11 +186,16 @@ static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) ata_wait_idle(ap); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { - writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr); - writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr); - writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr); - writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr); - writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr); + writew(tf->feature | (((u16)tf->hob_feature) << 8), + ioaddr->feature_addr); + writew(tf->nsect | (((u16)tf->hob_nsect) << 8), + ioaddr->nsect_addr); + writew(tf->lbal | (((u16)tf->hob_lbal) << 8), + ioaddr->lbal_addr); + writew(tf->lbam | (((u16)tf->hob_lbam) << 8), + ioaddr->lbam_addr); + writew(tf->lbah | (((u16)tf->hob_lbah) << 8), + ioaddr->lbah_addr); } else if (is_addr) { writew(tf->feature, ioaddr->feature_addr); writew(tf->nsect, ioaddr->nsect_addr); @@ -161,7 +236,7 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) tf->hob_lbal = lbal >> 8; tf->hob_lbam = lbam >> 8; tf->hob_lbah = lbah >> 8; - } + } } /** @@ -172,15 +247,16 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) * spin_lock_irqsave(host lock) */ -static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc) +static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); u8 dmactl; - void *mmio = (void *) ap->ioaddr.bmdma_addr; + void __iomem *mmio = ap->ioaddr.bmdma_addr; + /* load PRD table addr. */ mb(); /* make sure PRD table writes are visible to controller */ - writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); + writel(ap->bmdma_prd_dma, mmio + ATA_DMA_TABLE_OFS); /* specify data direction, triple-check start bit is clear */ dmactl = readb(mmio + ATA_DMA_CMD); @@ -191,7 +267,7 @@ static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc) /* issue r/w command if this is not a ATA DMA command*/ if (qc->tf.protocol != ATA_PROT_DMA) - ap->ops->exec_command(ap, &qc->tf); + ap->ops->sff_exec_command(ap, &qc->tf); } /** @@ -202,58 +278,54 @@ static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc) * spin_lock_irqsave(host lock) */ -static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc) +static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - void *mmio = (void *) ap->ioaddr.bmdma_addr; + void __iomem *mmio = ap->ioaddr.bmdma_addr; u8 dmactl; /* start host DMA transaction */ dmactl = readb(mmio + ATA_DMA_CMD); writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); - /* There is a race condition in certain SATA controllers that can - be seen when the r/w command is given to the controller before the - host DMA is started. On a Read command, the controller would initiate - the command to the drive even before it sees the DMA start. When there - are very fast drives connected to the controller, or when the data request - hits in the drive cache, there is the possibility that the drive returns a part - or all of the requested data to the controller before the DMA start is issued. - In this case, the controller would become confused as to what to do with the data. - In the worst case when all the data is returned back to the controller, the - controller could hang. In other cases it could return partial data returning - in data corruption. This problem has been seen in PPC systems and can also appear - on an system with very fast disks, where the SATA controller is sitting behind a - number of bridges, and hence there is significant latency between the r/w command - and the start command. */ - /* issue r/w command if the access is to ATA*/ + /* This works around possible data corruption. + + On certain SATA controllers that can be seen when the r/w + command is given to the controller before the host DMA is + started. + + On a Read command, the controller would initiate the + command to the drive even before it sees the DMA + start. When there are very fast drives connected to the + controller, or when the data request hits in the drive + cache, there is the possibility that the drive returns a + part or all of the requested data to the controller before + the DMA start is issued. In this case, the controller + would become confused as to what to do with the data. In + the worst case when all the data is returned back to the + controller, the controller could hang. In other cases it + could return partial data returning in data + corruption. This problem has been seen in PPC systems and + can also appear on an system with very fast disks, where + the SATA controller is sitting behind a number of bridges, + and hence there is significant latency between the r/w + command and the start command. */ + /* issue r/w command if the access is to ATA */ if (qc->tf.protocol == ATA_PROT_DMA) - ap->ops->exec_command(ap, &qc->tf); + ap->ops->sff_exec_command(ap, &qc->tf); } static u8 k2_stat_check_status(struct ata_port *ap) { - return readl((void *) ap->ioaddr.status_addr); + return readl(ap->ioaddr.status_addr); } #ifdef CONFIG_PPC_OF -/* - * k2_sata_proc_info - * inout : decides on the direction of the dataflow and the meaning of the - * variables - * buffer: If inout==FALSE data is being written to it else read from it - * *start: If inout==FALSE start of the valid data in the buffer - * offset: If inout==FALSE offset from the beginning of the imaginary file - * from which we start writing into the buffer - * length: If inout==FALSE max number of bytes to be written into the buffer - * else number of bytes in the buffer - */ -static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start, - off_t offset, int count, int inout) +static int k2_sata_show_info(struct seq_file *m, struct Scsi_Host *shost) { struct ata_port *ap; struct device_node *np; - int len, index; + int index; /* Find the ata_port */ ap = ata_shost_to_port(shost); @@ -268,72 +340,78 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start, /* Match it to a port node */ index = (ap == ap->host->ports[0]) ? 0 : 1; for (np = np->child; np != NULL; np = np->sibling) { - const u32 *reg = get_property(np, "reg", NULL); + const u32 *reg = of_get_property(np, "reg", NULL); if (!reg) continue; - if (index == *reg) + if (index == *reg) { + seq_printf(m, "devspec: %s\n", np->full_name); break; + } } - if (np == NULL) - return 0; - - len = sprintf(page, "devspec: %s\n", np->full_name); - - return len; + return 0; } #endif /* CONFIG_PPC_OF */ static struct scsi_host_template k2_sata_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, + ATA_BMDMA_SHT(DRV_NAME), #ifdef CONFIG_PPC_OF - .proc_info = k2_sata_proc_info, + .show_info = k2_sata_show_info, #endif - .bios_param = ata_std_bios_param, }; -static const struct ata_port_operations k2_sata_ops = { - .port_disable = ata_port_disable, - .tf_load = k2_sata_tf_load, - .tf_read = k2_sata_tf_read, - .check_status = k2_stat_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, +static struct ata_port_operations k2_sata_ops = { + .inherits = &ata_bmdma_port_ops, + .softreset = k2_sata_softreset, + .hardreset = k2_sata_hardreset, + .sff_tf_load = k2_sata_tf_load, + .sff_tf_read = k2_sata_tf_read, + .sff_check_status = k2_stat_check_status, + .check_atapi_dma = k2_sata_check_atapi_dma, .bmdma_setup = k2_bmdma_setup_mmio, .bmdma_start = k2_bmdma_start_mmio, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_mmio_data_xfer, - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ata_bmdma_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, .scr_read = k2_sata_scr_read, .scr_write = k2_sata_scr_write, - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_pci_host_stop, }; -static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base) +static const struct ata_port_info k2_port_info[] = { + /* chip_svw4 */ + { + .flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &k2_sata_ops, + }, + /* chip_svw8 */ + { + .flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA | + K2_FLAG_SATA_8_PORTS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &k2_sata_ops, + }, + /* chip_svw42 */ + { + .flags = ATA_FLAG_SATA | K2_FLAG_BAR_POS_3, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &k2_sata_ops, + }, + /* chip_svw43 */ + { + .flags = ATA_FLAG_SATA, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &k2_sata_ops, + }, +}; + +static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET; port->data_addr = base + K2_SATA_TF_DATA_OFFSET; @@ -353,63 +431,77 @@ static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base) } -static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - struct ata_probe_ent *probe_ent = NULL; - unsigned long base; + const struct ata_port_info *ppi[] = + { &k2_port_info[ent->driver_data], NULL }; + struct ata_host *host; void __iomem *mmio_base; - int pci_dev_busy = 0; - int rc; - int i; + int n_ports, i, rc, bar_pos; + + ata_print_version_once(&pdev->dev, DRV_VERSION); - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + /* allocate host */ + n_ports = 4; + if (ppi[0]->flags & K2_FLAG_SATA_8_PORTS) + n_ports = 8; + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); + if (!host) + return -ENOMEM; + + bar_pos = 5; + if (ppi[0]->flags & K2_FLAG_BAR_POS_3) + bar_pos = 3; /* * If this driver happens to only be useful on Apple's K2, then * we should check that here as it has a normal Serverworks ID */ - rc = pci_enable_device(pdev); + rc = pcim_enable_device(pdev); if (rc) return rc; + /* * Check if we have resources mapped at all (second function may * have been disabled by firmware) */ - if (pci_resource_len(pdev, 5) == 0) + if (pci_resource_len(pdev, bar_pos) == 0) { + /* In IDE mode we need to pin the device to ensure that + pcim_release does not clear the busmaster bit in config + space, clearing causes busmaster DMA to fail on + ports 3 & 4 */ + pcim_pin_device(pdev); return -ENODEV; - - /* Request PCI regions */ - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - pci_dev_busy = 1; - goto err_out; } - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + /* Request and iomap PCI regions */ + rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); if (rc) - goto err_out_regions; + return rc; + host->iomap = pcim_iomap_table(pdev); + mmio_base = host->iomap[bar_pos]; - probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); - if (probe_ent == NULL) { - rc = -ENOMEM; - goto err_out_regions; - } + /* different controllers have different number of ports - currently 4 or 8 */ + /* All ports are on the same function. Multi-function device is no + * longer available. This should not be seen in any system. */ + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + unsigned int offset = i * K2_SATA_PORT_OFFSET; - memset(probe_ent, 0, sizeof(*probe_ent)); - probe_ent->dev = pci_dev_to_dev(pdev); - INIT_LIST_HEAD(&probe_ent->node); + k2_sata_setup_port(&ap->ioaddr, mmio_base + offset); - mmio_base = pci_iomap(pdev, 5, 0); - if (mmio_base == NULL) { - rc = -ENOMEM; - goto err_out_free_ent; + ata_port_pbar_desc(ap, 5, -1, "mmio"); + ata_port_pbar_desc(ap, 5, offset, "port"); } - base = (unsigned long) mmio_base; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; /* Clear a magic bit in SCR1 according to Darwin, those help * some funky seagate drives (though so far, those were already @@ -422,44 +514,9 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET); writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); - probe_ent->sht = &k2_sata_sht; - probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO; - probe_ent->port_ops = &k2_sata_ops; - probe_ent->n_ports = 4; - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->mmio_base = mmio_base; - - /* We don't care much about the PIO/UDMA masks, but the core won't like us - * if we don't fill these - */ - probe_ent->pio_mask = 0x1f; - probe_ent->mwdma_mask = 0x7; - probe_ent->udma_mask = 0x7f; - - /* different controllers have different number of ports - currently 4 or 8 */ - /* All ports are on the same function. Multi-function device is no - * longer available. This should not be seen in any system. */ - for (i = 0; i < ent->driver_data; i++) - k2_sata_setup_port(&probe_ent->port[i], base + i * K2_SATA_PORT_OFFSET); - pci_set_master(pdev); - - /* FIXME: check ata_device_add return value */ - ata_device_add(probe_ent); - kfree(probe_ent); - - return 0; - -err_out_free_ent: - kfree(probe_ent); -err_out_regions: - pci_release_regions(pdev); -err_out: - if (!pci_dev_busy) - pci_disable_device(pdev); - return rc; + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &k2_sata_sht); } /* 0x240 is device ID for Apple K2 device @@ -469,15 +526,17 @@ err_out: * controller * */ static const struct pci_device_id k2_sata_pci_tbl[] = { - { 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, - { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, - { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, - { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, - { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, + { PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 }, + { PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw8 }, + { PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw4 }, + { PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 }, + { PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 }, + { PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 }, + { PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 }, + { } }; - static struct pci_driver k2_sata_pci_driver = { .name = DRV_NAME, .id_table = k2_sata_pci_tbl, @@ -485,24 +544,10 @@ static struct pci_driver k2_sata_pci_driver = { .remove = ata_pci_remove_one, }; - -static int __init k2_sata_init(void) -{ - return pci_register_driver(&k2_sata_pci_driver); -} - - -static void __exit k2_sata_exit(void) -{ - pci_unregister_driver(&k2_sata_pci_driver); -} - +module_pci_driver(k2_sata_pci_driver); MODULE_AUTHOR("Benjamin Herrenschmidt"); MODULE_DESCRIPTION("low-level driver for K2 SATA controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl); MODULE_VERSION(DRV_VERSION); - -module_init(k2_sata_init); -module_exit(k2_sata_exit); diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 091867e10ea..39b5de60a1f 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -1,7 +1,7 @@ /* * sata_sx4.c - Promise SATA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -30,26 +30,75 @@ * */ +/* + Theory of operation + ------------------- + + The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy + engine, DIMM memory, and four ATA engines (one per SATA port). + Data is copied to/from DIMM memory by the HDMA engine, before + handing off to one (or more) of the ATA engines. The ATA + engines operate solely on DIMM memory. + + The SX4 behaves like a PATA chip, with no SATA controls or + knowledge whatsoever, leading to the presumption that + PATA<->SATA bridges exist on SX4 boards, external to the + PDC20621 chip itself. + + The chip is quite capable, supporting an XOR engine and linked + hardware commands (permits a string to transactions to be + submitted and waited-on as a single unit), and an optional + microprocessor. + + The limiting factor is largely software. This Linux driver was + written to multiplex the single HDMA engine to copy disk + transactions into a fixed DIMM memory space, from where an ATA + engine takes over. As a result, each WRITE looks like this: + + submit HDMA packet to hardware + hardware copies data from system memory to DIMM + hardware raises interrupt + + submit ATA packet to hardware + hardware executes ATA WRITE command, w/ data in DIMM + hardware raises interrupt + + and each READ looks like this: + + submit ATA packet to hardware + hardware executes ATA READ command, w/ data in DIMM + hardware raises interrupt + + submit HDMA packet to hardware + hardware copies data from DIMM to system memory + hardware raises interrupt + + This is a very slow, lock-step way of doing things that can + certainly be improved by motivated kernel hackers. + + */ + #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> +#include <linux/slab.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> -#include <linux/sched.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> -#include <asm/io.h> #include "sata_promise.h" #define DRV_NAME "sata_sx4" -#define DRV_VERSION "0.9" +#define DRV_VERSION "0.12" enum { + PDC_MMIO_BAR = 3, + PDC_DIMM_BAR = 4, + PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */ PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ @@ -57,6 +106,8 @@ enum { PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */ + PDC_CTLSTAT = 0x60, /* IDEn control / status */ + PDC_20621_SEQCTL = 0x400, PDC_20621_SEQMASK = 0x480, PDC_20621_GENERAL_CTL = 0x484, @@ -86,50 +137,63 @@ enum { board_20621 = 0, /* FastTrak S150 SX4 */ - PDC_RESET = (1 << 11), /* HDMA reset */ + PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */ + PDC_RESET = (1 << 11), /* HDMA/ATA reset */ + PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */ PDC_MAX_HDMA = 32, PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1), - PDC_DIMM0_SPD_DEV_ADDRESS = 0x50, - PDC_DIMM1_SPD_DEV_ADDRESS = 0x51, - PDC_MAX_DIMM_MODULE = 0x02, - PDC_I2C_CONTROL_OFFSET = 0x48, - PDC_I2C_ADDR_DATA_OFFSET = 0x4C, - PDC_DIMM0_CONTROL_OFFSET = 0x80, - PDC_DIMM1_CONTROL_OFFSET = 0x84, - PDC_SDRAM_CONTROL_OFFSET = 0x88, - PDC_I2C_WRITE = 0x00000000, - PDC_I2C_READ = 0x00000040, - PDC_I2C_START = 0x00000080, - PDC_I2C_MASK_INT = 0x00000020, - PDC_I2C_COMPLETE = 0x00010000, - PDC_I2C_NO_ACK = 0x00100000, - PDC_DIMM_SPD_SUBADDRESS_START = 0x00, - PDC_DIMM_SPD_SUBADDRESS_END = 0x7F, - PDC_DIMM_SPD_ROW_NUM = 3, - PDC_DIMM_SPD_COLUMN_NUM = 4, - PDC_DIMM_SPD_MODULE_ROW = 5, - PDC_DIMM_SPD_TYPE = 11, - PDC_DIMM_SPD_FRESH_RATE = 12, - PDC_DIMM_SPD_BANK_NUM = 17, - PDC_DIMM_SPD_CAS_LATENCY = 18, - PDC_DIMM_SPD_ATTRIBUTE = 21, - PDC_DIMM_SPD_ROW_PRE_CHARGE = 27, - PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28, - PDC_DIMM_SPD_RAS_CAS_DELAY = 29, - PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30, - PDC_DIMM_SPD_SYSTEM_FREQ = 126, - PDC_CTL_STATUS = 0x08, - PDC_DIMM_WINDOW_CTLR = 0x0C, - PDC_TIME_CONTROL = 0x3C, - PDC_TIME_PERIOD = 0x40, - PDC_TIME_COUNTER = 0x44, - PDC_GENERAL_CTLR = 0x484, - PCI_PLL_INIT = 0x8A531824, - PCI_X_TCOUNT = 0xEE1E5CFF + PDC_DIMM0_SPD_DEV_ADDRESS = 0x50, + PDC_DIMM1_SPD_DEV_ADDRESS = 0x51, + PDC_I2C_CONTROL = 0x48, + PDC_I2C_ADDR_DATA = 0x4C, + PDC_DIMM0_CONTROL = 0x80, + PDC_DIMM1_CONTROL = 0x84, + PDC_SDRAM_CONTROL = 0x88, + PDC_I2C_WRITE = 0, /* master -> slave */ + PDC_I2C_READ = (1 << 6), /* master <- slave */ + PDC_I2C_START = (1 << 7), /* start I2C proto */ + PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */ + PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */ + PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */ + PDC_DIMM_SPD_SUBADDRESS_START = 0x00, + PDC_DIMM_SPD_SUBADDRESS_END = 0x7F, + PDC_DIMM_SPD_ROW_NUM = 3, + PDC_DIMM_SPD_COLUMN_NUM = 4, + PDC_DIMM_SPD_MODULE_ROW = 5, + PDC_DIMM_SPD_TYPE = 11, + PDC_DIMM_SPD_FRESH_RATE = 12, + PDC_DIMM_SPD_BANK_NUM = 17, + PDC_DIMM_SPD_CAS_LATENCY = 18, + PDC_DIMM_SPD_ATTRIBUTE = 21, + PDC_DIMM_SPD_ROW_PRE_CHARGE = 27, + PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28, + PDC_DIMM_SPD_RAS_CAS_DELAY = 29, + PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30, + PDC_DIMM_SPD_SYSTEM_FREQ = 126, + PDC_CTL_STATUS = 0x08, + PDC_DIMM_WINDOW_CTLR = 0x0C, + PDC_TIME_CONTROL = 0x3C, + PDC_TIME_PERIOD = 0x40, + PDC_TIME_COUNTER = 0x44, + PDC_GENERAL_CTLR = 0x484, + PCI_PLL_INIT = 0x8A531824, + PCI_X_TCOUNT = 0xEE1E5CFF, + + /* PDC_TIME_CONTROL bits */ + PDC_TIMER_BUZZER = (1 << 10), + PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */ + PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */ + PDC_TIMER_ENABLE = (1 << 7), + PDC_TIMER_MASK_INT = (1 << 5), + PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */ + PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE | + PDC_TIMER_ENABLE | + PDC_TIMER_MASK_INT, }; +#define ECC_ERASE_BUF_SZ (128 * 1024) struct pdc_port_priv { u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512]; @@ -138,8 +202,6 @@ struct pdc_port_priv { }; struct pdc_host_priv { - void __iomem *dimm_mmio; - unsigned int doing_hdma; unsigned int hdma_prod; unsigned int hdma_cons; @@ -151,91 +213,81 @@ struct pdc_host_priv { }; -static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs); -static void pdc_eng_timeout(struct ata_port *ap); -static void pdc_20621_phy_reset (struct ata_port *ap); +static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static void pdc_error_handler(struct ata_port *ap); +static void pdc_freeze(struct ata_port *ap); +static void pdc_thaw(struct ata_port *ap); static int pdc_port_start(struct ata_port *ap); -static void pdc_port_stop(struct ata_port *ap); static void pdc20621_qc_prep(struct ata_queued_cmd *qc); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); -static void pdc20621_host_stop(struct ata_host *host); -static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe); -static int pdc20621_detect_dimm(struct ata_probe_ent *pe); -static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, +static unsigned int pdc20621_dimm_init(struct ata_host *host); +static int pdc20621_detect_dimm(struct ata_host *host); +static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, u32 subaddr, u32 *pdata); -static int pdc20621_prog_dimm0(struct ata_probe_ent *pe); -static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe); +static int pdc20621_prog_dimm0(struct ata_host *host); +static unsigned int pdc20621_prog_dimm_global(struct ata_host *host); #ifdef ATA_VERBOSE_DEBUG -static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, +static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, u32 offset, u32 size); #endif -static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, +static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, u32 offset, u32 size); static void pdc20621_irq_clear(struct ata_port *ap); -static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc); +static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc); +static int pdc_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); +static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); static struct scsi_host_template pdc_sata_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, + ATA_BASE_SHT(DRV_NAME), .sg_tablesize = LIBATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, }; -static const struct ata_port_operations pdc_20621_ops = { - .port_disable = ata_port_disable, - .tf_load = pdc_tf_load_mmio, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = pdc_exec_command_mmio, - .dev_select = ata_std_dev_select, - .phy_reset = pdc_20621_phy_reset, +/* TODO: inherit from base port_ops after converting to new EH */ +static struct ata_port_operations pdc_20621_ops = { + .inherits = &ata_sff_port_ops, + + .check_atapi_dma = pdc_check_atapi_dma, .qc_prep = pdc20621_qc_prep, - .qc_issue = pdc20621_qc_issue_prot, - .data_xfer = ata_mmio_data_xfer, - .eng_timeout = pdc_eng_timeout, - .irq_handler = pdc20621_interrupt, - .irq_clear = pdc20621_irq_clear, + .qc_issue = pdc20621_qc_issue, + + .freeze = pdc_freeze, + .thaw = pdc_thaw, + .softreset = pdc_softreset, + .error_handler = pdc_error_handler, + .lost_interrupt = ATA_OP_NULL, + .post_internal_cmd = pdc_post_internal_cmd, + .port_start = pdc_port_start, - .port_stop = pdc_port_stop, - .host_stop = pdc20621_host_stop, + + .sff_tf_load = pdc_tf_load_mmio, + .sff_exec_command = pdc_exec_command_mmio, + .sff_irq_clear = pdc20621_irq_clear, }; static const struct ata_port_info pdc_port_info[] = { /* board_20621 */ { - .sht = &pdc_sata_sht, - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_SRST | ATA_FLAG_MMIO | - ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI | + ATA_FLAG_PIO_POLLING, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &pdc_20621_ops, }, }; static const struct pci_device_id pdc_sata_pci_tbl[] = { - { PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - board_20621 }, + { PCI_VDEVICE(PROMISE, 0x6622), board_20621 }, + { } /* terminate list */ }; - static struct pci_driver pdc_sata_pci_driver = { .name = DRV_NAME, .id_table = pdc_sata_pci_tbl, @@ -244,80 +296,30 @@ static struct pci_driver pdc_sata_pci_driver = { }; -static void pdc20621_host_stop(struct ata_host *host) -{ - struct pci_dev *pdev = to_pci_dev(host->dev); - struct pdc_host_priv *hpriv = host->private_data; - void __iomem *dimm_mmio = hpriv->dimm_mmio; - - pci_iounmap(pdev, dimm_mmio); - kfree(hpriv); - - pci_iounmap(pdev, host->mmio_base); -} - static int pdc_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct pdc_port_priv *pp; - int rc; - rc = ata_port_start(ap); - if (rc) - return rc; + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; - pp = kmalloc(sizeof(*pp), GFP_KERNEL); - if (!pp) { - rc = -ENOMEM; - goto err_out; - } - memset(pp, 0, sizeof(*pp)); - - pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); - if (!pp->pkt) { - rc = -ENOMEM; - goto err_out_kfree; - } + pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); + if (!pp->pkt) + return -ENOMEM; ap->private_data = pp; return 0; - -err_out_kfree: - kfree(pp); -err_out: - ata_port_stop(ap); - return rc; } - -static void pdc_port_stop(struct ata_port *ap) -{ - struct device *dev = ap->host->dev; - struct pdc_port_priv *pp = ap->private_data; - - ap->private_data = NULL; - dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma); - kfree(pp); - ata_port_stop(ap); -} - - -static void pdc_20621_phy_reset (struct ata_port *ap) -{ - VPRINTK("ENTER\n"); - ap->cbl = ATA_CBL_SATA; - ata_port_probe(ap); - ata_bus_reset(ap); -} - -static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf, - unsigned int portno, - unsigned int total_len) +static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno, + unsigned int total_len) { u32 addr; unsigned int dw = PDC_DIMM_APKT_PRD >> 2; - u32 *buf32 = (u32 *) buf; + __le32 *buf32 = (__le32 *) buf; /* output ATA packet S/G table */ addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + @@ -333,13 +335,12 @@ static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf, buf32[dw], buf32[dw + 1]); } -static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf, - unsigned int portno, - unsigned int total_len) +static inline void pdc20621_host_sg(u8 *buf, unsigned int portno, + unsigned int total_len) { u32 addr; unsigned int dw = PDC_DIMM_HPKT_PRD >> 2; - u32 *buf32 = (u32 *) buf; + __le32 *buf32 = (__le32 *) buf; /* output Host DMA packet S/G table */ addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + @@ -360,7 +361,7 @@ static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf, unsigned int portno) { unsigned int i, dw; - u32 *buf32 = (u32 *) buf; + __le32 *buf32 = (__le32 *) buf; u8 dev_reg; unsigned int dimm_sg = PDC_20621_DIMM_BASE + @@ -412,7 +413,8 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, unsigned int portno) { unsigned int dw; - u32 tmp, *buf32 = (u32 *) buf; + u32 tmp; + __le32 *buf32 = (__le32 *) buf; unsigned int host_sg = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + @@ -453,16 +455,15 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) struct scatterlist *sg; struct ata_port *ap = qc->ap; struct pdc_port_priv *pp = ap->private_data; - void __iomem *mmio = ap->host->mmio_base; - struct pdc_host_priv *hpriv = ap->host->private_data; - void __iomem *dimm_mmio = hpriv->dimm_mmio; + void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; + void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; unsigned int portno = ap->port_no; - unsigned int i, idx, total_len = 0, sgt_len; - u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; + unsigned int i, si, idx, total_len = 0, sgt_len; + __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); - VPRINTK("ata%u: ENTER\n", ap->id); + VPRINTK("ata%u: ENTER\n", ap->print_id); /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; @@ -471,7 +472,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) * Build S/G table */ idx = 0; - ata_for_each_sg(sg, qc) { + for_each_sg(qc->sg, sg, qc->n_elem, si) { buf[idx++] = cpu_to_le32(sg_dma_address(sg)); buf[idx++] = cpu_to_le32(sg_dma_len(sg)); total_len += sg_dma_len(sg); @@ -482,10 +483,10 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) /* * Build ATA, host DMA packets */ - pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len); + pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len); pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno); - pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len); + pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len); i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); if (qc->tf.flags & ATA_TFLAG_LBA48) @@ -514,13 +515,12 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pdc_port_priv *pp = ap->private_data; - void __iomem *mmio = ap->host->mmio_base; - struct pdc_host_priv *hpriv = ap->host->private_data; - void __iomem *dimm_mmio = hpriv->dimm_mmio; + void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; + void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; unsigned int portno = ap->port_no; unsigned int i; - VPRINTK("ata%u: ENTER\n", ap->id); + VPRINTK("ata%u: ENTER\n", ap->print_id); /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; @@ -566,7 +566,7 @@ static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, { struct ata_port *ap = qc->ap; struct ata_host *host = ap->host; - void __iomem *mmio = host->mmio_base; + void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; @@ -620,8 +620,7 @@ static void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; unsigned int port_no = ap->port_no; - struct pdc_host_priv *hpriv = ap->host->private_data; - void *dimm_mmio = hpriv->dimm_mmio; + void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP); dimm_mmio += PDC_DIMM_HOST_PKT; @@ -640,7 +639,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; struct ata_host *host = ap->host; unsigned int port_no = ap->port_no; - void __iomem *mmio = host->mmio_base; + void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); u8 seq = (u8) (port_no + 1); unsigned int port_ofs; @@ -648,7 +647,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc) /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; - VPRINTK("ata%u: ENTER\n", ap->id); + VPRINTK("ata%u: ENTER\n", ap->print_id); wmb(); /* flush PRD, pkt writes */ @@ -669,8 +668,8 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc) readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ writel(port_ofs + PDC_DIMM_ATA_PKT, - (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); - readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); VPRINTK("submitted ofs 0x%x (%u), seq %u\n", port_ofs + PDC_DIMM_ATA_PKT, port_ofs + PDC_DIMM_ATA_PKT, @@ -678,15 +677,18 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc) } } -static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) +static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc) { switch (qc->tf.protocol) { - case ATA_PROT_DMA: case ATA_PROT_NODATA: + if (qc->tf.flags & ATA_TFLAG_POLLING) + break; + /*FALLTHROUGH*/ + case ATA_PROT_DMA: pdc20621_packet_start(qc); return 0; - case ATA_PROT_ATAPI_DMA: + case ATAPI_PROT_DMA: BUG(); break; @@ -694,11 +696,11 @@ static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) break; } - return ata_qc_issue_prot(qc); + return ata_sff_qc_issue(qc); } -static inline unsigned int pdc20621_host_intr( struct ata_port *ap, - struct ata_queued_cmd *qc, +static inline unsigned int pdc20621_host_intr(struct ata_port *ap, + struct ata_queued_cmd *qc, unsigned int doing_hdma, void __iomem *mmio) { @@ -715,7 +717,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap, /* step two - DMA from DIMM to host */ if (doing_hdma) { - VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id, + VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* get drive status; clear intr; complete txn */ qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); @@ -726,7 +728,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap, /* step one - exec ATA command */ else { u8 seq = (u8) (port_no + 1 + 4); - VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->id, + VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* submit hdma pkt */ @@ -741,20 +743,20 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap, /* step one - DMA from host to DIMM */ if (doing_hdma) { u8 seq = (u8) (port_no + 1); - VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->id, + VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* submit ata pkt */ writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); readl(mmio + PDC_20621_SEQCTL + (seq * 4)); writel(port_ofs + PDC_DIMM_ATA_PKT, - (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); - readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); } /* step two - execute ATA command */ else { - VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id, + VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* get drive status; clear intr; complete txn */ qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); @@ -766,7 +768,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap, /* command completion, but no data xfer */ } else if (qc->tf.protocol == ATA_PROT_NODATA) { - status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); qc->err_mask |= ac_err_mask(status); ata_qc_complete(qc); @@ -781,15 +783,10 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap, static void pdc20621_irq_clear(struct ata_port *ap) { - struct ata_host *host = ap->host; - void __iomem *mmio = host->mmio_base; - - mmio += PDC_CHIP0_OFS; - - readl(mmio + PDC_20621_SEQMASK); + ioread8(ap->ioaddr.status_addr); } -static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; struct ata_port *ap; @@ -800,12 +797,12 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re VPRINTK("ENTER\n"); - if (!host || !host->mmio_base) { + if (!host || !host->iomap[PDC_MMIO_BAR]) { VPRINTK("QUICK EXIT\n"); return IRQ_NONE; } - mmio_base = host->mmio_base; + mmio_base = host->iomap[PDC_MMIO_BAR]; /* reading should also clear interrupts */ mmio_base += PDC_CHIP0_OFS; @@ -822,9 +819,9 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re return IRQ_NONE; } - spin_lock(&host->lock); + spin_lock(&host->lock); - for (i = 1; i < 9; i++) { + for (i = 1; i < 9; i++) { port_no = i - 1; if (port_no > 3) port_no -= 4; @@ -834,18 +831,17 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re ap = host->ports[port_no]; tmp = mask & (1 << i); VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); - if (tmp && ap && - !(ap->flags & ATA_FLAG_DISABLED)) { + if (tmp && ap) { struct ata_queued_cmd *qc; - qc = ata_qc_from_tag(ap, ap->active_tag); + qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) handled += pdc20621_host_intr(ap, qc, (i > 4), mmio_base); } } - spin_unlock(&host->lock); + spin_unlock(&host->lock); VPRINTK("mask == 0x%x\n", mask); @@ -854,59 +850,132 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re return IRQ_RETVAL(handled); } -static void pdc_eng_timeout(struct ata_port *ap) +static void pdc_freeze(struct ata_port *ap) { - u8 drv_stat; - struct ata_host *host = ap->host; - struct ata_queued_cmd *qc; - unsigned long flags; + void __iomem *mmio = ap->ioaddr.cmd_addr; + u32 tmp; - DPRINTK("ENTER\n"); + /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */ - spin_lock_irqsave(&host->lock, flags); + tmp = readl(mmio + PDC_CTLSTAT); + tmp |= PDC_MASK_INT; + tmp &= ~PDC_DMA_ENABLE; + writel(tmp, mmio + PDC_CTLSTAT); + readl(mmio + PDC_CTLSTAT); /* flush */ +} - qc = ata_qc_from_tag(ap, ap->active_tag); +static void pdc_thaw(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.cmd_addr; + u32 tmp; - switch (qc->tf.protocol) { - case ATA_PROT_DMA: - case ATA_PROT_NODATA: - ata_port_printk(ap, KERN_ERR, "command timeout\n"); - qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); - break; + /* FIXME: start HDMA engine, if zero ATA engines running */ - default: - drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + /* clear IRQ */ + ioread8(ap->ioaddr.status_addr); - ata_port_printk(ap, KERN_ERR, - "unknown timeout, cmd 0x%x stat 0x%x\n", - qc->tf.command, drv_stat); + /* turn IRQ back on */ + tmp = readl(mmio + PDC_CTLSTAT); + tmp &= ~PDC_MASK_INT; + writel(tmp, mmio + PDC_CTLSTAT); + readl(mmio + PDC_CTLSTAT); /* flush */ +} - qc->err_mask |= ac_err_mask(drv_stat); - break; +static void pdc_reset_port(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; + unsigned int i; + u32 tmp; + + /* FIXME: handle HDMA copy engine */ + + for (i = 11; i > 0; i--) { + tmp = readl(mmio); + if (tmp & PDC_RESET) + break; + + udelay(100); + + tmp |= PDC_RESET; + writel(tmp, mmio); } - spin_unlock_irqrestore(&host->lock, flags); - ata_eh_qc_complete(qc); - DPRINTK("EXIT\n"); + tmp &= ~PDC_RESET; + writel(tmp, mmio); + readl(mmio); /* flush */ +} + +static int pdc_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + pdc_reset_port(link->ap); + return ata_sff_softreset(link, class, deadline); +} + +static void pdc_error_handler(struct ata_port *ap) +{ + if (!(ap->pflags & ATA_PFLAG_FROZEN)) + pdc_reset_port(ap); + + ata_sff_error_handler(ap); +} + +static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + /* make DMA engine forget about the failed command */ + if (qc->flags & ATA_QCFLAG_FAILED) + pdc_reset_port(ap); +} + +static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) +{ + u8 *scsicmd = qc->scsicmd->cmnd; + int pio = 1; /* atapi dma off by default */ + + /* Whitelist commands that may use DMA. */ + switch (scsicmd[0]) { + case WRITE_12: + case WRITE_10: + case WRITE_6: + case READ_12: + case READ_10: + case READ_6: + case 0xad: /* READ_DVD_STRUCTURE */ + case 0xbe: /* READ_CD */ + pio = 0; + } + /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */ + if (scsicmd[0] == WRITE_10) { + unsigned int lba = + (scsicmd[2] << 24) | + (scsicmd[3] << 16) | + (scsicmd[4] << 8) | + scsicmd[5]; + if (lba >= 0xFFFF4FA2) + pio = 1; + } + return pio; } static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { - WARN_ON (tf->protocol == ATA_PROT_DMA || - tf->protocol == ATA_PROT_NODATA); - ata_tf_load(ap, tf); + WARN_ON(tf->protocol == ATA_PROT_DMA || + tf->protocol == ATAPI_PROT_DMA); + ata_sff_tf_load(ap, tf); } static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { - WARN_ON (tf->protocol == ATA_PROT_DMA || - tf->protocol == ATA_PROT_NODATA); - ata_exec_command(ap, tf); + WARN_ON(tf->protocol == ATA_PROT_DMA || + tf->protocol == ATAPI_PROT_DMA); + ata_sff_exec_command(ap, tf); } -static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base) +static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = base; port->data_addr = base; @@ -925,22 +994,21 @@ static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base) #ifdef ATA_VERBOSE_DEBUG -static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource, +static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, u32 offset, u32 size) { u32 window_size; u16 idx; u8 page_mask; long dist; - void __iomem *mmio = pe->mmio_base; - struct pdc_host_priv *hpriv = pe->private_data; - void __iomem *dimm_mmio = hpriv->dimm_mmio; + void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; + void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; page_mask = 0x00; - window_size = 0x2000 * 4; /* 32K byte uchar size */ + window_size = 0x2000 * 4; /* 32K byte uchar size */ idx = (u16) (offset / window_size); writel(0x01, mmio + PDC_GENERAL_CTLR); @@ -952,8 +1020,7 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource, idx++; dist = ((long) (window_size - (offset + size))) >= 0 ? size : (long) (window_size - offset); - memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4), - dist); + memcpy_fromio(psource, dimm_mmio + offset / 4, dist); psource += dist; size -= dist; @@ -962,11 +1029,10 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource, readl(mmio + PDC_GENERAL_CTLR); writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); - memcpy_fromio((char *) psource, (char *) (dimm_mmio), - window_size / 4); + memcpy_fromio(psource, dimm_mmio, window_size / 4); psource += window_size; size -= window_size; - idx ++; + idx++; } if (size) { @@ -974,29 +1040,27 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource, readl(mmio + PDC_GENERAL_CTLR); writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); - memcpy_fromio((char *) psource, (char *) (dimm_mmio), - size / 4); + memcpy_fromio(psource, dimm_mmio, size / 4); } } #endif -static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource, +static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, u32 offset, u32 size) { u32 window_size; u16 idx; u8 page_mask; long dist; - void __iomem *mmio = pe->mmio_base; - struct pdc_host_priv *hpriv = pe->private_data; - void __iomem *dimm_mmio = hpriv->dimm_mmio; + void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; + void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; page_mask = 0x00; - window_size = 0x2000 * 4; /* 32K byte uchar size */ + window_size = 0x2000 * 4; /* 32K byte uchar size */ idx = (u16) (offset / window_size); writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); @@ -1019,7 +1083,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource, readl(mmio + PDC_GENERAL_CTLR); psource += window_size; size -= window_size; - idx ++; + idx++; } if (size) { @@ -1032,13 +1096,13 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource, } -static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device, +static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, u32 subaddr, u32 *pdata) { - void __iomem *mmio = pe->mmio_base; + void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; u32 i2creg = 0; u32 status; - u32 count =0; + u32 count = 0; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; @@ -1047,17 +1111,17 @@ static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device, i2creg |= subaddr << 16; /* Set the device and subaddress */ - writel(i2creg, mmio + PDC_I2C_ADDR_DATA_OFFSET); - readl(mmio + PDC_I2C_ADDR_DATA_OFFSET); + writel(i2creg, mmio + PDC_I2C_ADDR_DATA); + readl(mmio + PDC_I2C_ADDR_DATA); /* Write Control to perform read operation, mask int */ writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT, - mmio + PDC_I2C_CONTROL_OFFSET); + mmio + PDC_I2C_CONTROL); for (count = 0; count <= 1000; count ++) { - status = readl(mmio + PDC_I2C_CONTROL_OFFSET); + status = readl(mmio + PDC_I2C_CONTROL); if (status & PDC_I2C_COMPLETE) { - status = readl(mmio + PDC_I2C_ADDR_DATA_OFFSET); + status = readl(mmio + PDC_I2C_ADDR_DATA); break; } else if (count == 1000) return 0; @@ -1068,33 +1132,33 @@ static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device, } -static int pdc20621_detect_dimm(struct ata_probe_ent *pe) +static int pdc20621_detect_dimm(struct ata_host *host) { - u32 data=0 ; - if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, + u32 data = 0; + if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_SYSTEM_FREQ, &data)) { - if (data == 100) + if (data == 100) return 100; - } else + } else return 0; - if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) { - if(data <= 0x75) + if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) { + if (data <= 0x75) return 133; - } else + } else return 0; - return 0; + return 0; } -static int pdc20621_prog_dimm0(struct ata_probe_ent *pe) +static int pdc20621_prog_dimm0(struct ata_host *host) { u32 spd0[50]; u32 data = 0; - int size, i; - u8 bdimmsize; - void __iomem *mmio = pe->mmio_base; + int size, i; + u8 bdimmsize; + void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; static const struct { unsigned int reg; unsigned int ofs; @@ -1116,53 +1180,53 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe) /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; - for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++) - pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, + for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++) + pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg, &spd0[pdc_i2c_read_data[i].ofs]); - data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); - data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | + data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); + data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | ((((spd0[27] + 9) / 10) - 1) << 8) ; - data |= (((((spd0[29] > spd0[28]) + data |= (((((spd0[29] > spd0[28]) ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10; - data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12; + data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12; - if (spd0[18] & 0x08) + if (spd0[18] & 0x08) data |= ((0x03) << 14); - else if (spd0[18] & 0x04) + else if (spd0[18] & 0x04) data |= ((0x02) << 14); - else if (spd0[18] & 0x01) + else if (spd0[18] & 0x01) data |= ((0x01) << 14); - else + else data |= (0 << 14); - /* + /* Calculate the size of bDIMMSize (power of 2) and merge the DIMM size by program start/end address. */ - bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3; - size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */ - data |= (((size / 16) - 1) << 16); - data |= (0 << 23); + bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3; + size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */ + data |= (((size / 16) - 1) << 16); + data |= (0 << 23); data |= 8; - writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET); - readl(mmio + PDC_DIMM0_CONTROL_OFFSET); - return size; + writel(data, mmio + PDC_DIMM0_CONTROL); + readl(mmio + PDC_DIMM0_CONTROL); + return size; } -static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe) +static unsigned int pdc20621_prog_dimm_global(struct ata_host *host) { u32 data, spd0; - int error, i; - void __iomem *mmio = pe->mmio_base; + int error, i; + void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ - mmio += PDC_CHIP0_OFS; + mmio += PDC_CHIP0_OFS; - /* + /* Set To Default : DIMM Module Global Control Register (0x022259F1) DIMM Arbitration Disable (bit 20) DIMM Data/Control Output Driving Selection (bit12 - bit15) @@ -1170,51 +1234,50 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe) */ data = 0x022259F1; - writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET); - readl(mmio + PDC_SDRAM_CONTROL_OFFSET); + writel(data, mmio + PDC_SDRAM_CONTROL); + readl(mmio + PDC_SDRAM_CONTROL); /* Turn on for ECC */ - pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, + pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE, &spd0); if (spd0 == 0x02) { data |= (0x01 << 16); - writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET); - readl(mmio + PDC_SDRAM_CONTROL_OFFSET); + writel(data, mmio + PDC_SDRAM_CONTROL); + readl(mmio + PDC_SDRAM_CONTROL); printk(KERN_ERR "Local DIMM ECC Enabled\n"); - } + } - /* DIMM Initialization Select/Enable (bit 18/19) */ - data &= (~(1<<18)); - data |= (1<<19); - writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET); + /* DIMM Initialization Select/Enable (bit 18/19) */ + data &= (~(1<<18)); + data |= (1<<19); + writel(data, mmio + PDC_SDRAM_CONTROL); - error = 1; - for (i = 1; i <= 10; i++) { /* polling ~5 secs */ - data = readl(mmio + PDC_SDRAM_CONTROL_OFFSET); + error = 1; + for (i = 1; i <= 10; i++) { /* polling ~5 secs */ + data = readl(mmio + PDC_SDRAM_CONTROL); if (!(data & (1<<19))) { - error = 0; - break; + error = 0; + break; } msleep(i*100); - } - return error; + } + return error; } -static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe) +static unsigned int pdc20621_dimm_init(struct ata_host *host) { int speed, size, length; - u32 addr,spd0,pci_status; - u32 tmp=0; - u32 time_period=0; - u32 tcount=0; - u32 ticks=0; - u32 clock=0; - u32 fparam=0; - void __iomem *mmio = pe->mmio_base; + u32 addr, spd0, pci_status; + u32 time_period = 0; + u32 tcount = 0; + u32 ticks = 0; + u32 clock = 0; + u32 fparam = 0; + void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ - mmio += PDC_CHIP0_OFS; + mmio += PDC_CHIP0_OFS; /* Initialize PLL based upon PCI Bus Frequency */ @@ -1224,7 +1287,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe) VPRINTK("Time Period Register (0x40): 0x%x\n", time_period); /* Enable timer */ - writel(0x00001a0, mmio + PDC_TIME_CONTROL); + writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL); readl(mmio + PDC_TIME_CONTROL); /* Wait 3 seconds */ @@ -1242,7 +1305,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe) If SX4 is on PCI-X bus, after 3 seconds, the timer counter register should be >= (0xffffffff - 3x10^8). */ - if(tcount >= PCI_X_TCOUNT) { + if (tcount >= PCI_X_TCOUNT) { ticks = (time_period - tcount); VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks); @@ -1270,44 +1333,46 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe) Read SPD of DIMM by I2C interface, and program the DIMM Module Controller. */ - if (!(speed = pdc20621_detect_dimm(pe))) { + if (!(speed = pdc20621_detect_dimm(host))) { printk(KERN_ERR "Detect Local DIMM Fail\n"); return 1; /* DIMM error */ - } - VPRINTK("Local DIMM Speed = %d\n", speed); + } + VPRINTK("Local DIMM Speed = %d\n", speed); - /* Programming DIMM0 Module Control Register (index_CID0:80h) */ - size = pdc20621_prog_dimm0(pe); - VPRINTK("Local DIMM Size = %dMB\n",size); + /* Programming DIMM0 Module Control Register (index_CID0:80h) */ + size = pdc20621_prog_dimm0(host); + VPRINTK("Local DIMM Size = %dMB\n", size); - /* Programming DIMM Module Global Control Register (index_CID0:88h) */ - if (pdc20621_prog_dimm_global(pe)) { + /* Programming DIMM Module Global Control Register (index_CID0:88h) */ + if (pdc20621_prog_dimm_global(host)) { printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n"); return 1; - } + } #ifdef ATA_VERBOSE_DEBUG { - u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ', - 'N','o','t',' ','Y','e','t',' ','D','e','f','i','n','e','d',' ', - '1','.','1','0', - '9','8','0','3','1','6','1','2',0,0}; + u8 test_parttern1[40] = + {0x55,0xAA,'P','r','o','m','i','s','e',' ', + 'N','o','t',' ','Y','e','t',' ', + 'D','e','f','i','n','e','d',' ', + '1','.','1','0', + '9','8','0','3','1','6','1','2',0,0}; u8 test_parttern2[40] = {0}; - pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x10040, 40); - pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x40, 40); + pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40); + pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40); - pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x10040, 40); - pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40); + pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40); + pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], test_parttern2[1], &(test_parttern2[2])); - pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040, + pdc20621_get_from_dimm(host, test_parttern2, 0x10040, 40); printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], test_parttern2[1], &(test_parttern2[2])); - pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x40, 40); - pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40); + pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40); + pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], test_parttern2[1], &(test_parttern2[2])); } @@ -1315,27 +1380,30 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe) /* ECC initiliazation. */ - pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, + pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE, &spd0); if (spd0 == 0x02) { + void *buf; VPRINTK("Start ECC initialization\n"); addr = 0; length = size * 1024 * 1024; + buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL); while (addr < length) { - pdc20621_put_to_dimm(pe, (void *) &tmp, addr, - sizeof(u32)); - addr += sizeof(u32); + pdc20621_put_to_dimm(host, buf, addr, + ECC_ERASE_BUF_SZ); + addr += ECC_ERASE_BUF_SZ; } + kfree(buf); VPRINTK("Finish ECC initialization\n"); } return 0; } -static void pdc_20621_init(struct ata_probe_ent *pe) +static void pdc_20621_init(struct ata_host *host) { u32 tmp; - void __iomem *mmio = pe->mmio_base; + void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; @@ -1363,140 +1431,71 @@ static void pdc_20621_init(struct ata_probe_ent *pe) readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ } -static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +static int pdc_sata_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { - static int printed_version; - struct ata_probe_ent *probe_ent = NULL; - unsigned long base; - void __iomem *mmio_base; - void __iomem *dimm_mmio = NULL; - struct pdc_host_priv *hpriv = NULL; - unsigned int board_idx = (unsigned int) ent->driver_data; - int pci_dev_busy = 0; - int rc; + const struct ata_port_info *ppi[] = + { &pdc_port_info[ent->driver_data], NULL }; + struct ata_host *host; + struct pdc_host_priv *hpriv; + int i, rc; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - rc = pci_enable_device(pdev); - if (rc) - return rc; + /* allocate host */ + host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4); + hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); + if (!host || !hpriv) + return -ENOMEM; - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - pci_dev_busy = 1; - goto err_out; - } + host->private_data = hpriv; - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + /* acquire resources and fill host */ + rc = pcim_enable_device(pdev); if (rc) - goto err_out_regions; - - probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); - if (probe_ent == NULL) { - rc = -ENOMEM; - goto err_out_regions; - } + return rc; - memset(probe_ent, 0, sizeof(*probe_ent)); - probe_ent->dev = pci_dev_to_dev(pdev); - INIT_LIST_HEAD(&probe_ent->node); + rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR), + DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + host->iomap = pcim_iomap_table(pdev); - mmio_base = pci_iomap(pdev, 3, 0); - if (mmio_base == NULL) { - rc = -ENOMEM; - goto err_out_free_ent; - } - base = (unsigned long) mmio_base; + for (i = 0; i < 4; i++) { + struct ata_port *ap = host->ports[i]; + void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS; + unsigned int offset = 0x200 + i * 0x80; - hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL); - if (!hpriv) { - rc = -ENOMEM; - goto err_out_iounmap; - } - memset(hpriv, 0, sizeof(*hpriv)); + pdc_sata_setup_port(&ap->ioaddr, base + offset); - dimm_mmio = pci_iomap(pdev, 4, 0); - if (!dimm_mmio) { - kfree(hpriv); - rc = -ENOMEM; - goto err_out_iounmap; + ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); + ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm"); + ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port"); } - hpriv->dimm_mmio = dimm_mmio; - - probe_ent->sht = pdc_port_info[board_idx].sht; - probe_ent->port_flags = pdc_port_info[board_idx].flags; - probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; - probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask; - probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask; - probe_ent->port_ops = pdc_port_info[board_idx].port_ops; - - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->mmio_base = mmio_base; - - probe_ent->private_data = hpriv; - base += PDC_CHIP0_OFS; + /* configure and activate */ + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; - probe_ent->n_ports = 4; - pdc_sata_setup_port(&probe_ent->port[0], base + 0x200); - pdc_sata_setup_port(&probe_ent->port[1], base + 0x280); - pdc_sata_setup_port(&probe_ent->port[2], base + 0x300); - pdc_sata_setup_port(&probe_ent->port[3], base + 0x380); + if (pdc20621_dimm_init(host)) + return -ENOMEM; + pdc_20621_init(host); pci_set_master(pdev); - - /* initialize adapter */ - /* initialize local dimm */ - if (pdc20621_dimm_init(probe_ent)) { - rc = -ENOMEM; - goto err_out_iounmap_dimm; - } - pdc_20621_init(probe_ent); - - /* FIXME: check ata_device_add return value */ - ata_device_add(probe_ent); - kfree(probe_ent); - - return 0; - -err_out_iounmap_dimm: /* only get to this label if 20621 */ - kfree(hpriv); - pci_iounmap(pdev, dimm_mmio); -err_out_iounmap: - pci_iounmap(pdev, mmio_base); -err_out_free_ent: - kfree(probe_ent); -err_out_regions: - pci_release_regions(pdev); -err_out: - if (!pci_dev_busy) - pci_disable_device(pdev); - return rc; -} - - -static int __init pdc_sata_init(void) -{ - return pci_register_driver(&pdc_sata_pci_driver); -} - - -static void __exit pdc_sata_exit(void) -{ - pci_unregister_driver(&pdc_sata_pci_driver); + return ata_host_activate(host, pdev->irq, pdc20621_interrupt, + IRQF_SHARED, &pdc_sata_sht); } +module_pci_driver(pdc_sata_pci_driver); MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("Promise SATA low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl); MODULE_VERSION(DRV_VERSION); - -module_init(pdc_sata_init); -module_exit(pdc_sata_exit); diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index dd76f37be18..08f98c3ed5c 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c @@ -26,8 +26,8 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/gfp.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -36,7 +36,7 @@ #include <linux/libata.h> #define DRV_NAME "sata_uli" -#define DRV_VERSION "1.0" +#define DRV_VERSION "1.3" enum { uli_5289 = 0, @@ -56,18 +56,18 @@ struct uli_priv { unsigned int scr_cfg_addr[uli_max_ports]; }; -static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); +static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static const struct pci_device_id uli_pci_tbl[] = { - { PCI_VENDOR_ID_AL, 0x5289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5289 }, - { PCI_VENDOR_ID_AL, 0x5287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5287 }, - { PCI_VENDOR_ID_AL, 0x5281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5281 }, + { PCI_VDEVICE(AL, 0x5289), uli_5289 }, + { PCI_VDEVICE(AL, 0x5287), uli_5287 }, + { PCI_VDEVICE(AL, 0x5281), uli_5281 }, + { } /* terminate list */ }; - static struct pci_driver uli_pci_driver = { .name = DRV_NAME, .id_table = uli_pci_tbl, @@ -76,61 +76,20 @@ static struct pci_driver uli_pci_driver = { }; static struct scsi_host_template uli_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; -static const struct ata_port_operations uli_ops = { - .port_disable = ata_port_disable, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ata_bmdma_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - +static struct ata_port_operations uli_ops = { + .inherits = &ata_bmdma_port_ops, .scr_read = uli_scr_read, .scr_write = uli_scr_write, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .hardreset = ATA_OP_NULL, }; -static struct ata_port_info uli_port_info = { - .sht = &uli_sht, - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 */ +static const struct ata_port_info uli_port_info = { + .flags = ATA_FLAG_SATA | ATA_FLAG_IGN_SIMPLEX, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, .port_ops = &uli_ops, }; @@ -147,107 +106,119 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg); } -static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) +static u32 uli_scr_cfg_read(struct ata_link *link, unsigned int sc_reg) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg); u32 val; pci_read_config_dword(pdev, cfg_addr, &val); return val; } -static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) +static void uli_scr_cfg_write(struct ata_link *link, unsigned int scr, u32 val) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - unsigned int cfg_addr = get_scr_cfg_addr(ap, scr); + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + unsigned int cfg_addr = get_scr_cfg_addr(link->ap, scr); pci_write_config_dword(pdev, cfg_addr, val); } -static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; + return -EINVAL; - return uli_scr_cfg_read(ap, sc_reg); + *val = uli_scr_cfg_read(link, sc_reg); + return 0; } -static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) { - if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0 - return; + if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0 + return -EINVAL; - uli_scr_cfg_write(ap, sc_reg, val); + uli_scr_cfg_write(link, sc_reg, val); + return 0; } -static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; - struct ata_probe_ent *probe_ent; - struct ata_port_info *ppi[2]; - int rc; + const struct ata_port_info *ppi[] = { &uli_port_info, NULL }; unsigned int board_idx = (unsigned int) ent->driver_data; - int pci_dev_busy = 0; + struct ata_host *host; struct uli_priv *hpriv; + void __iomem * const *iomap; + struct ata_ioports *ioaddr; + int n_ports, rc; - if (!printed_version++) - dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - rc = pci_enable_device(pdev); + rc = pcim_enable_device(pdev); if (rc) return rc; - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - pci_dev_busy = 1; - goto err_out; - } + n_ports = 2; + if (board_idx == uli_5287) + n_ports = 4; - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; + /* allocate the host */ + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); + if (!host) + return -ENOMEM; - ppi[0] = ppi[1] = &uli_port_info; - probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); - if (!probe_ent) { - rc = -ENOMEM; - goto err_out_regions; - } + hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + host->private_data = hpriv; - hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); - if (!hpriv) { - rc = -ENOMEM; - goto err_out_probe_ent; - } + /* the first two ports are standard SFF */ + rc = ata_pci_sff_init_host(host); + if (rc) + return rc; + + ata_pci_bmdma_init(host); - probe_ent->private_data = hpriv; + iomap = host->iomap; switch (board_idx) { case uli_5287: + /* If there are four, the last two live right after + * the standard SFF ports. + */ hpriv->scr_cfg_addr[0] = ULI5287_BASE; hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS; - probe_ent->n_ports = 4; - probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8; - probe_ent->port[2].altstatus_addr = - probe_ent->port[2].ctl_addr = - (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4; - probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16; + ioaddr = &host->ports[2]->ioaddr; + ioaddr->cmd_addr = iomap[0] + 8; + ioaddr->altstatus_addr = + ioaddr->ctl_addr = (void __iomem *) + ((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4; + ioaddr->bmdma_addr = iomap[4] + 16; hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4; - - probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8; - probe_ent->port[3].altstatus_addr = - probe_ent->port[3].ctl_addr = - (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4; - probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24; + ata_sff_std_ports(ioaddr); + + ata_port_desc(host->ports[2], + "cmd 0x%llx ctl 0x%llx bmdma 0x%llx", + (unsigned long long)pci_resource_start(pdev, 0) + 8, + ((unsigned long long)pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4, + (unsigned long long)pci_resource_start(pdev, 4) + 16); + + ioaddr = &host->ports[3]->ioaddr; + ioaddr->cmd_addr = iomap[2] + 8; + ioaddr->altstatus_addr = + ioaddr->ctl_addr = (void __iomem *) + ((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4; + ioaddr->bmdma_addr = iomap[4] + 24; hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5; + ata_sff_std_ports(ioaddr); + + ata_port_desc(host->ports[2], + "cmd 0x%llx ctl 0x%llx bmdma 0x%llx", + (unsigned long long)pci_resource_start(pdev, 2) + 9, + ((unsigned long long)pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4, + (unsigned long long)pci_resource_start(pdev, 4) + 24); - ata_std_ports(&probe_ent->port[2]); - ata_std_ports(&probe_ent->port[3]); break; case uli_5289: @@ -267,34 +238,8 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); pci_intx(pdev, 1); - - /* FIXME: check ata_device_add return value */ - ata_device_add(probe_ent); - kfree(probe_ent); - - return 0; - -err_out_probe_ent: - kfree(probe_ent); -err_out_regions: - pci_release_regions(pdev); -err_out: - if (!pci_dev_busy) - pci_disable_device(pdev); - return rc; - -} - -static int __init uli_init(void) -{ - return pci_register_driver(&uli_pci_driver); + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &uli_sht); } -static void __exit uli_exit(void) -{ - pci_unregister_driver(&uli_pci_driver); -} - - -module_init(uli_init); -module_exit(uli_exit); +module_pci_driver(uli_pci_driver); diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index a72a2389a11..47bf89464ce 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -1,9 +1,9 @@ /* * sata_via.c - VIA Serial ATA controllers * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org - on emails. + * on emails. * * Copyright 2003-2004 Red Hat, Inc. All rights reserved. * Copyright 2003-2004 Jeff Garzik @@ -30,56 +30,71 @@ * Hardware documentation available under NDA. * * - * To-do list: - * - VT6421 PATA support * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <linux/libata.h> -#include <asm/io.h> #define DRV_NAME "sata_via" -#define DRV_VERSION "2.0" +#define DRV_VERSION "2.6" +/* + * vt8251 is different from other sata controllers of VIA. It has two + * channels, each channel has both Master and Slave slot. + */ enum board_ids_enum { vt6420, vt6421, + vt8251, }; enum { SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ SATA_INT_GATE = 0x41, /* SATA interrupt gating */ SATA_NATIVE_MODE = 0x42, /* Native mode enable */ - SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */ + PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ + PATA_PIO_TIMING = 0xAB, /* PATA timing register */ PORT0 = (1 << 1), PORT1 = (1 << 0), ALL_PORTS = PORT0 | PORT1, - N_PORTS = 2, NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ - SATA_2DEV = (1 << 5), /* SATA is master/slave */ }; -static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); -static void vt6420_error_handler(struct ata_port *ap); +static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); +static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); +static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val); +static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val); +static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); +static void svia_noop_freeze(struct ata_port *ap); +static int vt6420_prereset(struct ata_link *link, unsigned long deadline); +static void vt6420_bmdma_start(struct ata_queued_cmd *qc); +static int vt6421_pata_cable_detect(struct ata_port *ap); +static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev); +static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev); static const struct pci_device_id svia_pci_tbl[] = { - { 0x1106, 0x0591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, - { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, - { 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 }, + { PCI_VDEVICE(VIA, 0x5337), vt6420 }, + { PCI_VDEVICE(VIA, 0x0591), vt6420 }, /* 2 sata chnls (Master) */ + { PCI_VDEVICE(VIA, 0x3149), vt6420 }, /* 2 sata chnls (Master) */ + { PCI_VDEVICE(VIA, 0x3249), vt6421 }, /* 2 sata chnls, 1 pata chnl */ + { PCI_VDEVICE(VIA, 0x5372), vt6420 }, + { PCI_VDEVICE(VIA, 0x7372), vt6420 }, + { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */ + { PCI_VDEVICE(VIA, 0x9000), vt8251 }, { } /* terminate list */ }; @@ -88,124 +103,216 @@ static struct pci_driver svia_pci_driver = { .name = DRV_NAME, .id_table = svia_pci_tbl, .probe = svia_init_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif .remove = ata_pci_remove_one, }; static struct scsi_host_template svia_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; -static const struct ata_port_operations vt6420_sata_ops = { - .port_disable = ata_port_disable, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = vt6420_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, +static struct ata_port_operations svia_base_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_tf_load = svia_tf_load, }; -static const struct ata_port_operations vt6421_sata_ops = { - .port_disable = ata_port_disable, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ata_bmdma_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, +static struct ata_port_operations vt6420_sata_ops = { + .inherits = &svia_base_ops, + .freeze = svia_noop_freeze, + .prereset = vt6420_prereset, + .bmdma_start = vt6420_bmdma_start, +}; - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, +static struct ata_port_operations vt6421_pata_ops = { + .inherits = &svia_base_ops, + .cable_detect = vt6421_pata_cable_detect, + .set_piomode = vt6421_set_pio_mode, + .set_dmamode = vt6421_set_dma_mode, +}; +static struct ata_port_operations vt6421_sata_ops = { + .inherits = &svia_base_ops, .scr_read = svia_scr_read, .scr_write = svia_scr_write, +}; - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, +static struct ata_port_operations vt8251_ops = { + .inherits = &svia_base_ops, + .hardreset = sata_std_hardreset, + .scr_read = vt8251_scr_read, + .scr_write = vt8251_scr_write, }; -static struct ata_port_info vt6420_port_info = { - .sht = &svia_sht, - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, - .pio_mask = 0x1f, - .mwdma_mask = 0x07, - .udma_mask = 0x7f, +static const struct ata_port_info vt6420_port_info = { + .flags = ATA_FLAG_SATA, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, .port_ops = &vt6420_sata_ops, }; +static struct ata_port_info vt6421_sport_info = { + .flags = ATA_FLAG_SATA, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &vt6421_sata_ops, +}; + +static struct ata_port_info vt6421_pport_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + /* No MWDMA */ + .udma_mask = ATA_UDMA6, + .port_ops = &vt6421_pata_ops, +}; + +static struct ata_port_info vt8251_port_info = { + .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &vt8251_ops, +}; + MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, svia_pci_tbl); MODULE_VERSION(DRV_VERSION); -static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; - return inl(ap->ioaddr.scr_addr + (4 * sc_reg)); + return -EINVAL; + *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg)); + return 0; } -static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return; - outl(val, ap->ioaddr.scr_addr + (4 * sc_reg)); + return -EINVAL; + iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg)); + return 0; +} + +static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val) +{ + static const u8 ipm_tbl[] = { 1, 2, 6, 0 }; + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + u8 raw; + + switch (scr) { + case SCR_STATUS: + pci_read_config_byte(pdev, 0xA0 + slot, &raw); + + /* read the DET field, bit0 and 1 of the config byte */ + v |= raw & 0x03; + + /* read the SPD field, bit4 of the configure byte */ + if (raw & (1 << 4)) + v |= 0x02 << 4; + else + v |= 0x01 << 4; + + /* read the IPM field, bit2 and 3 of the config byte */ + v |= ipm_tbl[(raw >> 2) & 0x3]; + break; + + case SCR_ERROR: + /* devices other than 5287 uses 0xA8 as base */ + WARN_ON(pdev->device != 0x5287); + pci_read_config_dword(pdev, 0xB0 + slot * 4, &v); + break; + + case SCR_CONTROL: + pci_read_config_byte(pdev, 0xA4 + slot, &raw); + + /* read the DET field, bit0 and bit1 */ + v |= ((raw & 0x02) << 1) | (raw & 0x01); + + /* read the IPM field, bit2 and bit3 */ + v |= ((raw >> 2) & 0x03) << 8; + break; + + default: + return -EINVAL; + } + + *val = v; + return 0; +} + +static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val) +{ + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + + switch (scr) { + case SCR_ERROR: + /* devices other than 5287 uses 0xA8 as base */ + WARN_ON(pdev->device != 0x5287); + pci_write_config_dword(pdev, 0xB0 + slot * 4, val); + return 0; + + case SCR_CONTROL: + /* set the DET field */ + v |= ((val & 0x4) >> 1) | (val & 0x1); + + /* set the IPM field */ + v |= ((val >> 8) & 0x3) << 2; + + pci_write_config_byte(pdev, 0xA4 + slot, v); + return 0; + + default: + return -EINVAL; + } +} + +/** + * svia_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller. + * + * This is to fix the internal bug of via chipsets, which will + * reset the device register after changing the IEN bit on ctl + * register. + */ +static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +{ + struct ata_taskfile ttf; + + if (tf->ctl != ap->last_ctl) { + ttf = *tf; + ttf.flags |= ATA_TFLAG_DEVICE; + tf = &ttf; + } + ata_sff_tf_load(ap, tf); +} + +static void svia_noop_freeze(struct ata_port *ap) +{ + /* Some VIA controllers choke if ATA_NIEN is manipulated in + * certain way. Leave it alone and just clear pending IRQ. + */ + ap->ops->sff_check_status(ap); + ata_bmdma_irq_clear(ap); } /** * vt6420_prereset - prereset for vt6420 - * @ap: target ATA port + * @link: target ATA link + * @deadline: deadline jiffies for the operation * * SCR registers on vt6420 are pieces of shit and may hang the * whole machine completely if accessed with the wrong timing. @@ -222,60 +329,92 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) * RETURNS: * 0 on success, -errno otherwise. */ -static int vt6420_prereset(struct ata_port *ap) +static int vt6420_prereset(struct ata_link *link, unsigned long deadline) { - struct ata_eh_context *ehc = &ap->eh_context; + struct ata_port *ap = link->ap; + struct ata_eh_context *ehc = &ap->link.eh_context; unsigned long timeout = jiffies + (HZ * 5); u32 sstatus, scontrol; int online; /* don't do any SCR stuff if we're not loading */ - if (!ATA_PFLAG_LOADING) + if (!(ap->pflags & ATA_PFLAG_LOADING)) goto skip_scr; - /* Resume phy. This is the old resume sequence from - * __sata_phy_reset(). - */ - svia_scr_write(ap, SCR_CONTROL, 0x300); - svia_scr_read(ap, SCR_CONTROL); /* flush */ + /* Resume phy. This is the old SATA resume sequence */ + svia_scr_write(link, SCR_CONTROL, 0x300); + svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */ /* wait for phy to become ready, if necessary */ do { - msleep(200); - if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1) + ata_msleep(link->ap, 200); + svia_scr_read(link, SCR_STATUS, &sstatus); + if ((sstatus & 0xf) != 1) break; } while (time_before(jiffies, timeout)); /* open code sata_print_link_status() */ - sstatus = svia_scr_read(ap, SCR_STATUS); - scontrol = svia_scr_read(ap, SCR_CONTROL); + svia_scr_read(link, SCR_STATUS, &sstatus); + svia_scr_read(link, SCR_CONTROL, &scontrol); online = (sstatus & 0xf) == 0x3; - ata_port_printk(ap, KERN_INFO, - "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n", - online ? "up" : "down", sstatus, scontrol); + ata_port_info(ap, + "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n", + online ? "up" : "down", sstatus, scontrol); /* SStatus is read one more time */ - svia_scr_read(ap, SCR_STATUS); + svia_scr_read(link, SCR_STATUS, &sstatus); if (!online) { /* tell EH to bail */ - ehc->i.action &= ~ATA_EH_RESET_MASK; + ehc->i.action &= ~ATA_EH_RESET; return 0; } skip_scr: /* wait for !BSY */ - ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + ata_sff_wait_ready(link, deadline); return 0; } -static void vt6420_error_handler(struct ata_port *ap) +static void vt6420_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + if ((qc->tf.command == ATA_CMD_PACKET) && + (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) { + /* Prevents corruption on some ATAPI burners */ + ata_sff_pause(ap); + } + ata_bmdma_start(qc); +} + +static int vt6421_pata_cable_detect(struct ata_port *ap) { - return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, - NULL, ata_std_postreset); + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u8 tmp; + + pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp); + if (tmp & 0x10) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; +} + +static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 }; + pci_write_config_byte(pdev, PATA_PIO_TIMING - adev->devno, + pio_bits[adev->pio_mode - XFER_PIO_0]); +} + +static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 }; + pci_write_config_byte(pdev, PATA_UDMA_TIMING - adev->devno, + udma_bits[adev->dma_mode - XFER_UDMA_0]); } static const unsigned int svia_bar_sizes[] = { @@ -286,96 +425,130 @@ static const unsigned int vt6421_bar_sizes[] = { 16, 16, 16, 16, 32, 128 }; -static unsigned long svia_scr_addr(unsigned long addr, unsigned int port) +static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port) { return addr + (port * 128); } -static unsigned long vt6421_scr_addr(unsigned long addr, unsigned int port) +static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port) { return addr + (port * 64); } -static void vt6421_init_addrs(struct ata_probe_ent *probe_ent, - struct pci_dev *pdev, - unsigned int port) +static void vt6421_init_addrs(struct ata_port *ap) +{ + void __iomem * const * iomap = ap->host->iomap; + void __iomem *reg_addr = iomap[ap->port_no]; + void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8); + struct ata_ioports *ioaddr = &ap->ioaddr; + + ioaddr->cmd_addr = reg_addr; + ioaddr->altstatus_addr = + ioaddr->ctl_addr = (void __iomem *) + ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS); + ioaddr->bmdma_addr = bmdma_addr; + ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); + + ata_sff_std_ports(ioaddr); + + ata_port_pbar_desc(ap, ap->port_no, -1, "port"); + ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma"); +} + +static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) { - unsigned long reg_addr = pci_resource_start(pdev, port); - unsigned long bmdma_addr = pci_resource_start(pdev, 4) + (port * 8); - unsigned long scr_addr; + const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL }; + struct ata_host *host; + int rc; - probe_ent->port[port].cmd_addr = reg_addr; - probe_ent->port[port].altstatus_addr = - probe_ent->port[port].ctl_addr = (reg_addr + 8) | ATA_PCI_CTL_OFS; - probe_ent->port[port].bmdma_addr = bmdma_addr; + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); + if (rc) + return rc; + *r_host = host; + + rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); + if (rc) { + dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n"); + return rc; + } - scr_addr = vt6421_scr_addr(pci_resource_start(pdev, 5), port); - probe_ent->port[port].scr_addr = scr_addr; + host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0); + host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1); - ata_std_ports(&probe_ent->port[port]); + return 0; } -static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev) +static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) { - struct ata_probe_ent *probe_ent; - struct ata_port_info *ppi[2]; - - ppi[0] = ppi[1] = &vt6420_port_info; - probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); - if (!probe_ent) - return NULL; - - probe_ent->port[0].scr_addr = - svia_scr_addr(pci_resource_start(pdev, 5), 0); - probe_ent->port[1].scr_addr = - svia_scr_addr(pci_resource_start(pdev, 5), 1); - - return probe_ent; + const struct ata_port_info *ppi[] = + { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info }; + struct ata_host *host; + int i, rc; + + *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi)); + if (!host) { + dev_err(&pdev->dev, "failed to allocate host\n"); + return -ENOMEM; + } + + rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); + if (rc) { + dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n", + rc); + return rc; + } + host->iomap = pcim_iomap_table(pdev); + + for (i = 0; i < host->n_ports; i++) + vt6421_init_addrs(host->ports[i]); + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + + return 0; } -static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev) +static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) { - struct ata_probe_ent *probe_ent; - unsigned int i; - - probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); - if (!probe_ent) - return NULL; + const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL }; + struct ata_host *host; + int i, rc; - memset(probe_ent, 0, sizeof(*probe_ent)); - probe_ent->dev = pci_dev_to_dev(pdev); - INIT_LIST_HEAD(&probe_ent->node); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); + if (rc) + return rc; + *r_host = host; - probe_ent->sht = &svia_sht; - probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; - probe_ent->port_ops = &vt6421_sata_ops; - probe_ent->n_ports = N_PORTS; - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->pio_mask = 0x1f; - probe_ent->mwdma_mask = 0x07; - probe_ent->udma_mask = 0x7f; + rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); + if (rc) { + dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n"); + return rc; + } - for (i = 0; i < N_PORTS; i++) - vt6421_init_addrs(probe_ent, pdev, i); + /* 8251 hosts four sata ports as M/S of the two channels */ + for (i = 0; i < host->n_ports; i++) + ata_slave_link_init(host->ports[i]); - return probe_ent; + return 0; } -static void svia_configure(struct pci_dev *pdev) +static void svia_configure(struct pci_dev *pdev, int board_id) { u8 tmp8; pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); - dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n", - (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); + dev_info(&pdev->dev, "routed to hard irq line %d\n", + (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); /* make sure SATA channels are enabled */ pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); if ((tmp8 & ALL_PORTS) != ALL_PORTS) { - dev_printk(KERN_DEBUG, &pdev->dev, - "enabling SATA channels (0x%x)\n", - (int) tmp8); + dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n", + (int)tmp8); tmp8 |= ALL_PORTS; pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); } @@ -383,9 +556,8 @@ static void svia_configure(struct pci_dev *pdev) /* make sure interrupts for each channel sent to us */ pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); if ((tmp8 & ALL_PORTS) != ALL_PORTS) { - dev_printk(KERN_DEBUG, &pdev->dev, - "enabling SATA channel interrupts (0x%x)\n", - (int) tmp8); + dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n", + (int) tmp8); tmp8 |= ALL_PORTS; pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); } @@ -393,111 +565,93 @@ static void svia_configure(struct pci_dev *pdev) /* make sure native mode is enabled */ pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { - dev_printk(KERN_DEBUG, &pdev->dev, - "enabling SATA channel native mode (0x%x)\n", - (int) tmp8); + dev_dbg(&pdev->dev, + "enabling SATA channel native mode (0x%x)\n", + (int) tmp8); tmp8 |= NATIVE_MODE_ALL; pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); } + + /* + * vt6420/1 has problems talking to some drives. The following + * is the fix from Joseph Chan <JosephChan@via.com.tw>. + * + * When host issues HOLD, device may send up to 20DW of data + * before acknowledging it with HOLDA and the host should be + * able to buffer them in FIFO. Unfortunately, some WD drives + * send up to 40DW before acknowledging HOLD and, in the + * default configuration, this ends up overflowing vt6421's + * FIFO, making the controller abort the transaction with + * R_ERR. + * + * Rx52[2] is the internal 128DW FIFO Flow control watermark + * adjusting mechanism enable bit and the default value 0 + * means host will issue HOLD to device when the left FIFO + * size goes below 32DW. Setting it to 1 makes the watermark + * 64DW. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=15173 + * http://article.gmane.org/gmane.linux.ide/46352 + * http://thread.gmane.org/gmane.linux.kernel/1062139 + */ + if (board_id == vt6420 || board_id == vt6421) { + pci_read_config_byte(pdev, 0x52, &tmp8); + tmp8 |= 1 << 2; + pci_write_config_byte(pdev, 0x52, tmp8); + } } -static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; unsigned int i; int rc; - struct ata_probe_ent *probe_ent; + struct ata_host *host = NULL; int board_id = (int) ent->driver_data; - const int *bar_sizes; - int pci_dev_busy = 0; - u8 tmp8; + const unsigned *bar_sizes; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); - rc = pci_enable_device(pdev); + rc = pcim_enable_device(pdev); if (rc) return rc; - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - pci_dev_busy = 1; - goto err_out; - } - - if (board_id == vt6420) { - pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8); - if (tmp8 & SATA_2DEV) { - dev_printk(KERN_ERR, &pdev->dev, - "SATA master/slave not supported (0x%x)\n", - (int) tmp8); - rc = -EIO; - goto err_out_regions; - } - - bar_sizes = &svia_bar_sizes[0]; - } else { + if (board_id == vt6421) bar_sizes = &vt6421_bar_sizes[0]; - } + else + bar_sizes = &svia_bar_sizes[0]; for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) if ((pci_resource_start(pdev, i) == 0) || (pci_resource_len(pdev, i) < bar_sizes[i])) { - dev_printk(KERN_ERR, &pdev->dev, + dev_err(&pdev->dev, "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", i, - (unsigned long long)pci_resource_start(pdev, i), - (unsigned long long)pci_resource_len(pdev, i)); - rc = -ENODEV; - goto err_out_regions; + (unsigned long long)pci_resource_start(pdev, i), + (unsigned long long)pci_resource_len(pdev, i)); + return -ENODEV; } - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - goto err_out_regions; - - if (board_id == vt6420) - probe_ent = vt6420_init_probe_ent(pdev); - else - probe_ent = vt6421_init_probe_ent(pdev); - - if (!probe_ent) { - dev_printk(KERN_ERR, &pdev->dev, "out of memory\n"); - rc = -ENOMEM; - goto err_out_regions; + switch (board_id) { + case vt6420: + rc = vt6420_prepare_host(pdev, &host); + break; + case vt6421: + rc = vt6421_prepare_host(pdev, &host); + break; + case vt8251: + rc = vt8251_prepare_host(pdev, &host); + break; + default: + rc = -EINVAL; } + if (rc) + return rc; - svia_configure(pdev); + svia_configure(pdev, board_id); pci_set_master(pdev); - - /* FIXME: check ata_device_add return value */ - ata_device_add(probe_ent); - kfree(probe_ent); - - return 0; - -err_out_regions: - pci_release_regions(pdev); -err_out: - if (!pci_dev_busy) - pci_disable_device(pdev); - return rc; -} - -static int __init svia_init(void) -{ - return pci_register_driver(&svia_pci_driver); + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &svia_sht); } -static void __exit svia_exit(void) -{ - pci_unregister_driver(&svia_pci_driver); -} - -module_init(svia_init); -module_exit(svia_exit); - +module_pci_driver(svia_pci_driver); diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index d0d92f33de5..29e847aac34 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c @@ -37,7 +37,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -47,9 +46,11 @@ #include <linux/libata.h> #define DRV_NAME "sata_vsc" -#define DRV_VERSION "2.0" +#define DRV_VERSION "2.3" enum { + VSC_MMIO_BAR = 0, + /* Interrupt register offsets (from chip base address) */ VSC_SATA_INT_STAT_OFFSET = 0x00, VSC_SATA_INT_MASK_OFFSET = 0x04, @@ -96,25 +97,45 @@ enum { VSC_SATA_INT_PHY_CHANGE), }; - -#define is_vsc_sata_int_err(port_idx, int_status) \ - (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx))) +static int vsc_sata_scr_read(struct ata_link *link, + unsigned int sc_reg, u32 *val) +{ + if (sc_reg > SCR_CONTROL) + return -EINVAL; + *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; +} -static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int vsc_sata_scr_write(struct ata_link *link, + unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; - return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); + return -EINVAL; + writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } -static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, - u32 val) +static void vsc_freeze(struct ata_port *ap) { - if (sc_reg > SCR_CONTROL) - return; - writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); + void __iomem *mask_addr; + + mask_addr = ap->host->iomap[VSC_MMIO_BAR] + + VSC_SATA_INT_MASK_OFFSET + ap->port_no; + + writeb(0, mask_addr); +} + + +static void vsc_thaw(struct ata_port *ap) +{ + void __iomem *mask_addr; + + mask_addr = ap->host->iomap[VSC_MMIO_BAR] + + VSC_SATA_INT_MASK_OFFSET + ap->port_no; + + writeb(0xff, mask_addr); } @@ -123,7 +144,7 @@ static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl) void __iomem *mask_addr; u8 mask; - mask_addr = ap->host->mmio_base + + mask_addr = ap->host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_MASK_OFFSET + ap->port_no; mask = readb(mask_addr); if (ctl & ATA_NIEN) @@ -142,18 +163,24 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) /* * The only thing the ctl register is used for is SRST. * That is not enabled or disabled via tf_load. - * However, if ATA_NIEN is changed, then we need to change the interrupt register. + * However, if ATA_NIEN is changed, then we need to change + * the interrupt register. */ if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) { ap->last_ctl = tf->ctl; vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { - writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr); - writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr); - writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr); - writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr); - writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr); + writew(tf->feature | (((u16)tf->hob_feature) << 8), + ioaddr->feature_addr); + writew(tf->nsect | (((u16)tf->hob_nsect) << 8), + ioaddr->nsect_addr); + writew(tf->lbal | (((u16)tf->hob_lbal) << 8), + ioaddr->lbal_addr); + writew(tf->lbam | (((u16)tf->hob_lbam) << 8), + ioaddr->lbam_addr); + writew(tf->lbah | (((u16)tf->hob_lbah) << 8), + ioaddr->lbah_addr); } else if (is_addr) { writew(tf->feature, ioaddr->feature_addr); writew(tf->nsect, ioaddr->nsect_addr); @@ -174,7 +201,7 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) struct ata_ioports *ioaddr = &ap->ioaddr; u16 nsect, lbal, lbam, lbah, feature; - tf->command = ata_check_status(ap); + tf->command = ata_sff_check_status(ap); tf->device = readw(ioaddr->device_addr); feature = readw(ioaddr->error_addr); nsect = readw(ioaddr->nsect_addr); @@ -194,124 +221,97 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) tf->hob_lbal = lbal >> 8; tf->hob_lbam = lbam >> 8; tf->hob_lbah = lbah >> 8; - } + } +} + +static inline void vsc_error_intr(u8 port_status, struct ata_port *ap) +{ + if (port_status & (VSC_SATA_INT_PHY_CHANGE | VSC_SATA_INT_ERROR_M)) + ata_port_freeze(ap); + else + ata_port_abort(ap); } +static void vsc_port_intr(u8 port_status, struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + int handled = 0; + + if (unlikely(port_status & VSC_SATA_INT_ERROR)) { + vsc_error_intr(port_status, ap); + return; + } + + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) + handled = ata_bmdma_port_intr(ap, qc); + + /* We received an interrupt during a polled command, + * or some other spurious condition. Interrupt reporting + * with this hardware is fairly reliable so it is safe to + * simply clear the interrupt + */ + if (unlikely(!handled)) + ap->ops->sff_check_status(ap); +} /* * vsc_sata_interrupt * - * Read the interrupt register and process for the devices that have them pending. + * Read the interrupt register and process for the devices that have + * them pending. */ -static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, - struct pt_regs *regs) +static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; unsigned int i; unsigned int handled = 0; - u32 int_status; + u32 status; - spin_lock(&host->lock); + status = readl(host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_STAT_OFFSET); + + if (unlikely(status == 0xffffffff || status == 0)) { + if (status) + dev_err(host->dev, + ": IRQ status == 0xffffffff, PCI fault or device removal?\n"); + goto out; + } - int_status = readl(host->mmio_base + VSC_SATA_INT_STAT_OFFSET); + spin_lock(&host->lock); for (i = 0; i < host->n_ports; i++) { - if (int_status & ((u32) 0xFF << (8 * i))) { - struct ata_port *ap; - - ap = host->ports[i]; - - if (is_vsc_sata_int_err(i, int_status)) { - u32 err_status; - printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__); - err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0; - vsc_sata_scr_write(ap, SCR_ERROR, err_status); - handled++; - } - - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { - struct ata_queued_cmd *qc; - - qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) - handled += ata_host_intr(ap, qc); - else if (is_vsc_sata_int_err(i, int_status)) { - /* - * On some chips (i.e. Intel 31244), an error - * interrupt will sneak in at initialization - * time (phy state changes). Clearing the SCR - * error register is not required, but it prevents - * the phy state change interrupts from recurring - * later. - */ - u32 err_status; - err_status = vsc_sata_scr_read(ap, SCR_ERROR); - printk(KERN_DEBUG "%s: clearing interrupt, " - "status %x; sata err status %x\n", - __FUNCTION__, - int_status, err_status); - vsc_sata_scr_write(ap, SCR_ERROR, err_status); - /* Clear interrupt status */ - ata_chk_status(ap); - handled++; - } - } + u8 port_status = (status >> (8 * i)) & 0xff; + if (port_status) { + vsc_port_intr(port_status, host->ports[i]); + handled++; } } spin_unlock(&host->lock); - +out: return IRQ_RETVAL(handled); } static struct scsi_host_template vsc_sata_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .ioctl = ata_scsi_ioctl, - .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, - .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = LIBATA_MAX_PRD, - .cmd_per_lun = ATA_SHT_CMD_PER_LUN, - .emulated = ATA_SHT_EMULATED, - .use_clustering = ATA_SHT_USE_CLUSTERING, - .proc_name = DRV_NAME, - .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, - .bios_param = ata_std_bios_param, + ATA_BMDMA_SHT(DRV_NAME), }; -static const struct ata_port_operations vsc_sata_ops = { - .port_disable = ata_port_disable, - .tf_load = vsc_sata_tf_load, - .tf_read = vsc_sata_tf_read, - .exec_command = ata_exec_command, - .check_status = ata_check_status, - .dev_select = ata_std_dev_select, - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_mmio_data_xfer, - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = ata_bmdma_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - .irq_handler = vsc_sata_interrupt, - .irq_clear = ata_bmdma_irq_clear, +static struct ata_port_operations vsc_sata_ops = { + .inherits = &ata_bmdma_port_ops, + /* The IRQ handling is not quite standard SFF behaviour so we + cannot use the default lost interrupt handler */ + .lost_interrupt = ATA_OP_NULL, + .sff_tf_load = vsc_sata_tf_load, + .sff_tf_read = vsc_sata_tf_read, + .freeze = vsc_freeze, + .thaw = vsc_thaw, .scr_read = vsc_sata_scr_read, .scr_write = vsc_sata_scr_write, - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_pci_host_stop, }; -static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base) +static void vsc_sata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET; port->data_addr = base + VSC_SATA_TF_DATA_OFFSET; @@ -333,90 +333,77 @@ static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned lon } -static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +static int vsc_sata_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { - static int printed_version; - struct ata_probe_ent *probe_ent = NULL; - unsigned long base; - int pci_dev_busy = 0; + static const struct ata_port_info pi = { + .flags = ATA_FLAG_SATA, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &vsc_sata_ops, + }; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct ata_host *host; void __iomem *mmio_base; - int rc; + int i, rc; + u8 cls; + + ata_print_version_once(&pdev->dev, DRV_VERSION); - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + /* allocate host */ + host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4); + if (!host) + return -ENOMEM; - rc = pci_enable_device(pdev); + rc = pcim_enable_device(pdev); if (rc) return rc; - /* - * Check if we have needed resource mapped. - */ - if (pci_resource_len(pdev, 0) == 0) { - rc = -ENODEV; - goto err_out; - } + /* check if we have needed resource mapped */ + if (pci_resource_len(pdev, 0) == 0) + return -ENODEV; + + /* map IO regions and initialize host accordingly */ + rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + host->iomap = pcim_iomap_table(pdev); + + mmio_base = host->iomap[VSC_MMIO_BAR]; - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) { - pci_dev_busy = 1; - goto err_out; + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + unsigned int offset = (i + 1) * VSC_SATA_PORT_OFFSET; + + vsc_sata_setup_port(&ap->ioaddr, mmio_base + offset); + + ata_port_pbar_desc(ap, VSC_MMIO_BAR, -1, "mmio"); + ata_port_pbar_desc(ap, VSC_MMIO_BAR, offset, "port"); } /* * Use 32 bit DMA mask, because 64 bit address support is poor. */ - rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) - goto err_out_regions; - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + return rc; + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) - goto err_out_regions; - - probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); - if (probe_ent == NULL) { - rc = -ENOMEM; - goto err_out_regions; - } - memset(probe_ent, 0, sizeof(*probe_ent)); - probe_ent->dev = pci_dev_to_dev(pdev); - INIT_LIST_HEAD(&probe_ent->node); - - mmio_base = pci_iomap(pdev, 0, 0); - if (mmio_base == NULL) { - rc = -ENOMEM; - goto err_out_free_ent; - } - base = (unsigned long) mmio_base; + return rc; /* - * Due to a bug in the chip, the default cache line size can't be used - */ - pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80); - - probe_ent->sht = &vsc_sata_sht; - probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO; - probe_ent->port_ops = &vsc_sata_ops; - probe_ent->n_ports = 4; - probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; - probe_ent->mmio_base = mmio_base; - - /* We don't care much about the PIO/UDMA masks, but the core won't like us - * if we don't fill these + * Due to a bug in the chip, the default cache line size can't be + * used (unless the default is non-zero). */ - probe_ent->pio_mask = 0x1f; - probe_ent->mwdma_mask = 0x07; - probe_ent->udma_mask = 0x7f; + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cls); + if (cls == 0x00) + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80); - /* We have 4 ports per PCI function */ - vsc_sata_setup_port(&probe_ent->port[0], base + 1 * VSC_SATA_PORT_OFFSET); - vsc_sata_setup_port(&probe_ent->port[1], base + 2 * VSC_SATA_PORT_OFFSET); - vsc_sata_setup_port(&probe_ent->port[2], base + 3 * VSC_SATA_PORT_OFFSET); - vsc_sata_setup_port(&probe_ent->port[3], base + 4 * VSC_SATA_PORT_OFFSET); - - pci_set_master(pdev); + if (pci_enable_msi(pdev) == 0) + pci_intx(pdev, 0); /* * Config offset 0x98 is "Extended Control and Status Register 0" @@ -426,32 +413,20 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d */ pci_write_config_dword(pdev, 0x98, 0); - /* FIXME: check ata_device_add return value */ - ata_device_add(probe_ent); - kfree(probe_ent); - - return 0; - -err_out_free_ent: - kfree(probe_ent); -err_out_regions: - pci_release_regions(pdev); -err_out: - if (!pci_dev_busy) - pci_disable_device(pdev); - return rc; + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, vsc_sata_interrupt, + IRQF_SHARED, &vsc_sata_sht); } - static const struct pci_device_id vsc_sata_pci_tbl[] = { { PCI_VENDOR_ID_VITESSE, 0x7174, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, { PCI_VENDOR_ID_INTEL, 0x3200, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, + { } /* terminate list */ }; - static struct pci_driver vsc_sata_pci_driver = { .name = DRV_NAME, .id_table = vsc_sata_pci_tbl, @@ -459,24 +434,10 @@ static struct pci_driver vsc_sata_pci_driver = { .remove = ata_pci_remove_one, }; - -static int __init vsc_sata_init(void) -{ - return pci_register_driver(&vsc_sata_pci_driver); -} - - -static void __exit vsc_sata_exit(void) -{ - pci_unregister_driver(&vsc_sata_pci_driver); -} - +module_pci_driver(vsc_sata_pci_driver); MODULE_AUTHOR("Jeremy Higdon"); MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl); MODULE_VERSION(DRV_VERSION); - -module_init(vsc_sata_init); -module_exit(vsc_sata_exit); diff --git a/drivers/ata/sis.h b/drivers/ata/sis.h new file mode 100644 index 00000000000..f7f3eebe666 --- /dev/null +++ b/drivers/ata/sis.h @@ -0,0 +1,5 @@ + +struct ata_port_info; + +/* pata_sis.c */ +extern const struct ata_port_info sis_info133_for_sata; |
