diff options
Diffstat (limited to 'drivers/ata/sata_dwc.c')
-rw-r--r-- | drivers/ata/sata_dwc.c | 2316 |
1 files changed, 2316 insertions, 0 deletions
diff --git a/drivers/ata/sata_dwc.c b/drivers/ata/sata_dwc.c new file mode 100644 index 00000000000..8e5405c71a4 --- /dev/null +++ b/drivers/ata/sata_dwc.c @@ -0,0 +1,2316 @@ +/* + * drivers/ata/sata_dwc.c + * + * Synopsys DesignWare Cores (DWC) SATA host driver + * + * Author: Mark Miesfeld <mmiesfeld@amcc.com> + * + * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de> + * Copyright 2008 DENX Software Engineering + * + * Based on versions provided by AMCC and Synopsys which are: + * Copyright 2006 Applied Micro Circuits Corporation + * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifdef CONFIG_SATA_DWC_DEBUG +#define DEBUG +#endif + +#ifdef CONFIG_SATA_DWC_VDEBUG +#define VERBOSE_DEBUG +#define DEBUG_NCQ +#endif + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/of_platform.h> +#include <linux/libata.h> + +#include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> + +#define DRV_NAME "sata-dwc" +#define DRV_VERSION "1.0" + +/* SATA DMA driver Globals */ +#if defined(CONFIG_APM82181) +#define DMA_NUM_CHANS 2 +#else +#define DMA_NUM_CHANS 1 +#endif + +#define DMA_NUM_CHAN_REGS 8 + +/* SATA DMA Register definitions */ +#if defined(CONFIG_APM82181) +#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */ +#else +#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */ +#endif + +struct dmareg { + u32 low; /* Low bits 0-31 */ + u32 high; /* High bits 32-63 */ +}; + +/* DMA Per Channel registers */ + +struct dma_chan_regs { + struct dmareg sar; /* Source Address */ + struct dmareg dar; /* Destination address */ + struct dmareg llp; /* Linked List Pointer */ + struct dmareg ctl; /* Control */ + struct dmareg sstat; /* Source Status not implemented in core */ + struct dmareg dstat; /* Destination Status not implemented in core */ + struct dmareg sstatar; /* Source Status Address not impl in core */ + struct dmareg dstatar; /* Destination Status Address not implemented */ + struct dmareg cfg; /* Config */ + struct dmareg sgr; /* Source Gather */ + struct dmareg dsr; /* Destination Scatter */ +}; + +/* Generic Interrupt Registers */ +struct dma_interrupt_regs { + struct dmareg tfr; /* Transfer Interrupt */ + struct dmareg block; /* Block Interrupt */ + struct dmareg srctran; /* Source Transfer Interrupt */ + struct dmareg dsttran; /* Dest Transfer Interrupt */ + struct dmareg error; /* Error */ +}; + +struct ahb_dma_regs { + struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS]; + struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */ + struct dma_interrupt_regs interrupt_status; /* Interrupt Status */ + struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */ + struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */ + struct dmareg statusInt; /* Interrupt combined */ + struct dmareg rq_srcreg; /* Src Trans Req */ + struct dmareg rq_dstreg; /* Dst Trans Req */ + struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req */ + struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req */ + struct dmareg rq_lst_srcreg; /* Last Src Trans Req */ + struct dmareg rq_lst_dstreg; /* Last Dst Trans Req */ + struct dmareg dma_cfg; /* DMA Config */ + struct dmareg dma_chan_en; /* DMA Channel Enable */ + struct dmareg dma_id; /* DMA ID */ + struct dmareg dma_test; /* DMA Test */ + struct dmareg res1; /* reserved */ + struct dmareg res2; /* reserved */ + + /* DMA Comp Params + * Param 6 = dma_param[0], Param 5 = dma_param[1], + * Param 4 = dma_param[2] ... + */ + struct dmareg dma_params[6]; +}; + +/* Data structure for linked list item */ +struct lli { + u32 sar; /* Source Address */ + u32 dar; /* Destination address */ + u32 llp; /* Linked List Pointer */ + struct dmareg ctl; /* Control */ +#if defined(CONFIG_APM82181) + u32 dstat; /* Source status is not supported */ +#else + struct dmareg dstat; /* Destination Status */ +#endif +}; + +#define SATA_DWC_DMAC_LLI_SZ (sizeof(struct lli)) +#define SATA_DWC_DMAC_LLI_NUM 256 +#define SATA_DWC_DMAC_TWIDTH_BYTES 4 +#define SATA_DWC_DMAC_LLI_TBL_SZ \ + (SATA_DWC_DMAC_LLI_SZ * SATA_DWC_DMAC_LLI_NUM) +#if defined(CONFIG_APM82181) +#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \ + (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES) +#else +#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \ + (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES) +#endif +/* DMA Register Operation Bits */ +#define DMA_EN 0x00000001 /* Enable AHB DMA */ +#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */ +#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \ + ((0x000000001 << (ch)) << 8)) +#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8)) + +/* Channel Control Register */ +#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */ +#define DMA_CTL_LLP_SRCEN 0x10000000 /* Blk chain enable Src */ +#define DMA_CTL_LLP_DSTEN 0x08000000 /* Blk chain enable Dst */ +/* + * This define is used to set block chaining disabled in the control low + * register. It is already in little endian format so it can be &'d dirctly. + * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN)) + */ +#define DMA_CTL_LLP_DISABLE_LE32 0xffffffe7 +#define DMA_CTL_SMS(num) ((num & 0x3) << 25) /*Src Master Select*/ +#define DMA_CTL_DMS(num) ((num & 0x3) << 23) /*Dst Master Select*/ +#define DMA_CTL_TTFC(type) ((type & 0x7) << 20) /*Type&Flow cntr*/ +#define DMA_CTL_TTFC_P2M_DMAC 0x00000002 /*Per mem,DMAC cntr*/ +#define DMA_CTL_TTFC_M2P_PER 0x00000003 /*Mem per,peri cntr*/ +#define DMA_CTL_SRC_MSIZE(size) ((size & 0x7) << 14) /*Src Burst Len*/ +#define DMA_CTL_DST_MSIZE(size) ((size & 0x7) << 11) /*Dst Burst Len*/ +#define DMA_CTL_SINC_INC 0x00000000 /*Src addr incr*/ +#define DMA_CTL_SINC_DEC 0x00000200 +#define DMA_CTL_SINC_NOCHANGE 0x00000400 +#define DMA_CTL_DINC_INC 0x00000000 /*Dst addr incr*/ +#define DMA_CTL_DINC_DEC 0x00000080 +#define DMA_CTL_DINC_NOCHANGE 0x00000100 +#define DMA_CTL_SRC_TRWID(size) ((size & 0x7) << 4) /*Src Trnsfr Width*/ +#define DMA_CTL_DST_TRWID(size) ((size & 0x7) << 1) /*Dst Trnsfr Width*/ +#define DMA_CTL_INT_EN 0x00000001 /*Interrupt Enable*/ + +/* Channel Configuration Register high bits */ +#define DMA_CFG_FCMOD_REQ 0x00000001 /*Flow cntrl req*/ +#define DMA_CFG_PROTCTL (0x00000003 << 2) /*Protection cntrl*/ + +/* Channel Configuration Register low bits */ +#define DMA_CFG_RELD_DST 0x80000000 /*Reload Dst/Src Addr*/ +#define DMA_CFG_RELD_SRC 0x40000000 +#define DMA_CFG_HS_SELSRC 0x00000800 /*SW hndshk Src/Dst*/ +#define DMA_CFG_HS_SELDST 0x00000400 +#define DMA_CFG_FIFOEMPTY (0x00000001 << 9) /*FIFO Empty bit*/ + +/* Assign hardware handshaking interface (x) to dst / sre peripheral */ +#define DMA_CFG_HW_HS_DEST(int_num) ((int_num & 0xF) << 11) +#define DMA_CFG_HW_HS_SRC(int_num) ((int_num & 0xF) << 7) + +/* Channel Linked List Pointer Register */ +#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master)) +#define DMA_LLP_AHBMASTER1 0 /* List Master Select */ +#define DMA_LLP_AHBMASTER2 1 + +#define SATA_DWC_MAX_PORTS 1 + +#define SATA_DWC_SCR_OFFSET 0x24 +#define SATA_DWC_REG_OFFSET 0x64 + +/* DWC SATA Registers */ +struct sata_dwc_regs { + u32 fptagr; /* 1st party DMA tag */ + u32 fpbor; /* 1st party DMA buffer offset */ + u32 fptcr; /* 1st party DMA Xfr count */ + u32 dmacr; /* DMA Control */ + u32 dbtsr; /* DMA Burst Transac size */ + u32 intpr; /* Interrupt Pending */ + u32 intmr; /* Interrupt Mask */ + u32 errmr; /* Error Mask */ + u32 llcr; /* Link Layer Control */ + u32 phycr; /* PHY Control */ + u32 physr; /* PHY Status */ + u32 rxbistpd; /* Recvd BIST pattern def register */ + u32 rxbistpd1; /* Recvd BIST data dword1 */ + u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ + u32 txbistpd; /* Trans BIST pattern def register */ + u32 txbistpd1; /* Trans BIST data dword1 */ + u32 txbistpd2; /* Trans BIST data dword2 */ + u32 bistcr; /* BIST Control Register */ + u32 bistfctr; /* BIST FIS Count Register */ + u32 bistsr; /* BIST Status Register */ + u32 bistdecr; /* BIST Dword Error count register */ + u32 res[15]; /* Reserved locations */ + u32 testr; /* Test Register */ + u32 versionr; /* Version Register */ + u32 idr; /* ID Register */ + u32 unimpl[192]; /* Unimplemented */ + u32 dmadr[256]; /* FIFO Locations in DMA Mode */ +}; + +#define SCR_SCONTROL_DET_ENABLE 0x00000001 +#define SCR_SSTATUS_DET_PRESENT 0x00000001 +#define SCR_SERROR_DIAG_X 0x04000000 + +/* DWC SATA Register Operations */ +#define SATA_DWC_TXFIFO_DEPTH 0x01FF +#define SATA_DWC_RXFIFO_DEPTH 0x01FF + +#define SATA_DWC_DMACR_TMOD_TXCHEN 0x00000004 +#define SATA_DWC_DMACR_TXCHEN (0x00000001 | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_RXCHEN (0x00000002 | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_TXRXCH_CLEAR SATA_DWC_DMACR_TMOD_TXCHEN + +#define SATA_DWC_DBTSR_MWR(size) ((size/4) & \ + SATA_DWC_TXFIFO_DEPTH) +#define SATA_DWC_DBTSR_MRD(size) (((size/4) & \ + SATA_DWC_RXFIFO_DEPTH) << 16) +#define SATA_DWC_INTPR_DMAT 0x00000001 +#define SATA_DWC_INTPR_NEWFP 0x00000002 +#define SATA_DWC_INTPR_PMABRT 0x00000004 +#define SATA_DWC_INTPR_ERR 0x00000008 +#define SATA_DWC_INTPR_NEWBIST 0x00000010 +#define SATA_DWC_INTPR_IPF 0x10000000 +#define SATA_DWC_INTMR_DMATM 0x00000001 +#define SATA_DWC_INTMR_NEWFPM 0x00000002 +#define SATA_DWC_INTMR_PMABRTM 0x00000004 +#define SATA_DWC_INTMR_ERRM 0x00000008 +#define SATA_DWC_INTMR_NEWBISTM 0x00000010 +#define SATA_DWC_LLCR_SCRAMEN 0x00000001 +#define SATA_DWC_LLCR_DESCRAMEN 0x00000002 +#define SATA_DWC_LLCR_RPDEN 0x00000004 + +/* This is all error bits, zero's are reserved fields. */ +#define SATA_DWC_SERROR_ERR_BITS 0x0FFF0F03 + +#define SATA_DWC_SCR0_SPD_GET(v) ((v >> 4) & 0x0000000F) + +struct sata_dwc_device { + struct resource reg; /* Resource for register */ + struct device *dev; /* generic device struct */ + struct ata_probe_ent *pe; /* ptr to probe-ent */ + struct ata_host *host; + u8 *reg_base; + struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ + u8 *scr_base; + int dma_channel; /* DWC SATA DMA channel */ + int irq_dma; +}; + +#define SATA_DWC_QCMD_MAX 32 + +struct sata_dwc_device_port { + struct sata_dwc_device *hsdev; + int cmd_issued[SATA_DWC_QCMD_MAX]; + struct lli *llit[SATA_DWC_QCMD_MAX]; + dma_addr_t llit_dma[SATA_DWC_QCMD_MAX]; + u32 dma_chan[SATA_DWC_QCMD_MAX]; + int dma_pending[SATA_DWC_QCMD_MAX]; + u32 sata_dwc_sactive_issued; /* issued queued ops */ + u32 sata_dwc_sactive_queued; /* queued ops */ + u32 dma_interrupt_count; + +}; + +static struct sata_dwc_device* dwc_dev_list[2]; +static int dma_intr_registered = 0; + +/* + * Commonly used DWC SATA driver Macros + */ +#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *) \ + (host)->private_data) +#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *) \ + (ap)->host->private_data) +#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *) \ + (ap)->private_data) +#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *) \ + (qc)->ap->host->private_data) +#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *) \ + (hsdevp)->hsdev) + +enum { + SATA_DWC_CMD_ISSUED_NOT = 0, + SATA_DWC_CMD_ISSUED_PENDING = 1, + SATA_DWC_CMD_ISSUED_EXEC = 2, + SATA_DWC_CMD_ISSUED_NODATA = 3, + + SATA_DWC_DMA_PENDING_NONE = 0, + SATA_DWC_DMA_PENDING_TX = 1, + SATA_DWC_DMA_PENDING_RX = 2, +}; + +/* + * Globals + */ +static struct ahb_dma_regs *sata_dma_regs = 0; +/* +* Duc: Moving to device port field to make it private per each port +*/ +//static u32 sata_dwc_sactive_issued = 0; /* issued queued ops */ +//static u32 sata_dwc_sactive_queued = 0; /* queued ops */ +//static u32 dma_interrupt_count; + +/* + * Prototypes + */ +static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); +static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, + u32 check_status); +static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); +static void sata_dwc_port_stop(struct ata_port *ap); +static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); + +static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq); +static void dma_dwc_exit(struct sata_dwc_device *hsdev); +static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc, + struct lli *lli, dma_addr_t dma_lli, + void __iomem *addr); +static void dma_dwc_xfer_start(int dma_ch); +static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch); + +static const char *dir_2_txt(enum dma_data_direction dir) +{ + switch (dir) { + case DMA_BIDIRECTIONAL: + return "bi"; + case DMA_FROM_DEVICE: + return "from"; + case DMA_TO_DEVICE: + return "to"; + case DMA_NONE: + return "none"; + default: + return "err"; + } +} + +static const char *prot_2_txt(enum ata_tf_protocols protocol) +{ + switch (protocol) { + case ATA_PROT_UNKNOWN: + return "unknown"; + case ATA_PROT_NODATA: + return "nodata"; + case ATA_PROT_PIO: + return "pio"; + case ATA_PROT_DMA: + return "dma"; + case ATA_PROT_NCQ: + return "ncq"; + case ATAPI_PROT_PIO: + return "atapi pio"; + case ATAPI_PROT_NODATA: + return "atapi nodata"; + case ATAPI_PROT_DMA: + return "atapi dma"; + default: + return "err"; + } +} + +inline const char *ata_cmd_2_txt(const struct ata_taskfile *tf) +{ + switch (tf->command) { + case ATA_CMD_CHK_POWER: + return "ATA_CMD_CHK_POWER"; + case ATA_CMD_EDD: + return "ATA_CMD_EDD"; + case ATA_CMD_FLUSH: + return "ATA_CMD_FLUSH"; + case ATA_CMD_FLUSH_EXT: + return "ATA_CMD_FLUSH_EXT"; + case ATA_CMD_ID_ATA: + return "ATA_CMD_ID_ATA"; + case ATA_CMD_ID_ATAPI: + return "ATA_CMD_ID_ATAPI"; + case ATA_CMD_FPDMA_READ: + return "ATA_CMD_FPDMA_READ"; + case ATA_CMD_FPDMA_WRITE: + return "ATA_CMD_FPDMA_WRITE"; + case ATA_CMD_READ: + return "ATA_CMD_READ"; + case ATA_CMD_READ_EXT: + return "ATA_CMD_READ_EXT"; + case ATA_CMD_WRITE: + return "ATA_CMD_WRITE"; + case ATA_CMD_WRITE_EXT: + return "ATA_CMD_WRITE_EXT"; + case ATA_CMD_PIO_READ: + return "ATA_CMD_PIO_READ"; + case ATA_CMD_PIO_READ_EXT: + return "ATA_CMD_PIO_READ_EXT"; + case ATA_CMD_PIO_WRITE: + return "ATA_CMD_PIO_WRITE"; + case ATA_CMD_PIO_WRITE_EXT: + return "ATA_CMD_PIO_WRITE_EXT"; + case ATA_CMD_SET_FEATURES: + return "ATA_CMD_SET_FEATURES"; + case ATA_CMD_PACKET: + return "ATA_CMD_PACKET"; + default: + return "ATA_CMD_???"; + } +} + +static void sata_dwc_tf_dump(struct device *dwc_dev, struct ata_taskfile *tf) +{ + dev_vdbg(dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx" + "device: %x\n", tf->command, prot_2_txt(tf->protocol), + tf->flags, tf->device); + dev_vdbg(dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam:" + "0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal, + tf->lbam, tf->lbah); + dev_vdbg(dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x " + "hob_lbam: 0x%x hob_lbah: 0x%x\n", tf->hob_feature, + tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, + tf->hob_lbah); +} + +/* + * Function: get_burst_length_encode + * arguments: datalength: length in bytes of data + * returns value to be programmed in register corrresponding to data length + * This value is effectively the log(base 2) of the length + */ +static inline int get_burst_length_encode(int datalength) +{ + int items = datalength >> 2; /* div by 4 to get lword count */ + + if (items >= 64) + return 5; + + if (items >= 32) + return 4; + + if (items >= 16) + return 3; + + if (items >= 8) + return 2; + + if (items >= 4) + return 1; + + return 0; +} + +static inline void clear_chan_interrupts(int c) +{ + out_le32(&(sata_dma_regs->interrupt_clear.tfr.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.block.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.srctran.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.dsttran.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.error.low), DMA_CHANNEL(c)); +} + +/* + * Function: dma_request_channel + * arguments: None + * returns channel number if available else -1 + * This function assigns the next available DMA channel from the list to the + * requester + */ +static int dma_request_channel(struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + + if (!(in_le32(&(sata_dma_regs->dma_chan_en.low)) & DMA_CHANNEL(hsdev->dma_channel))) { + #if defined(VERBOSE_DEBUG) + dev_notice(ap->dev, "%s Successfully requested DMA channel %d\n", + __func__, hsdev->dma_channel); + #endif + return (hsdev->dma_channel); + } + + return -1; +} + + + +/* + * Function: dma_dwc_interrupt + * arguments: irq, dev_id, pt_regs + * returns channel number if available else -1 + * Interrupt Handler for DW AHB SATA DMA + */ +static int dma_dwc_interrupt(int irq, void *hsdev_instance) +{ + int chan; + volatile u32 tfr_reg, err_reg; + unsigned long flags; + struct sata_dwc_device *hsdev = + (struct sata_dwc_device *)hsdev_instance; + struct ata_host *host = (struct ata_host *)hsdev->host; + struct ata_port *ap; + struct sata_dwc_device_port *hsdevp; + u8 tag = 0; + unsigned int port = 0; + spin_lock_irqsave(&host->lock, flags); + + + ap = host->ports[port]; + hsdevp = HSDEVP_FROM_AP(ap); + tag = ap->link.active_tag; +#if defined(VERBOSE_DEBUG) + dev_notice(ap->dev, "%s: DMA interrupt in channel %d\n", __func__, hsdevp->hsdev->dma_channel); +#endif + tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low)); + err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low)); + + dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n", + tfr_reg, err_reg, hsdevp->dma_pending[tag], port); +#if 0 + for (chan = 0; chan < DMA_NUM_CHANS; chan++) { + /* Check for end-of-transfer interrupt. */ + + /* Do nothing if this interrupt does not happen in hsdev DMA channel */ + if (hsdev->dma_channel != chan) { + continue; + } +#endif + chan = hsdev->dma_channel; + + if (tfr_reg & DMA_CHANNEL(chan)) { + /* + *Each DMA command produces 2 interrupts. Only + * complete the command after both interrupts have been + * seen. (See sata_dwc_isr()) + */ + hsdevp->dma_interrupt_count++; + + sata_dwc_clear_dmacr(hsdevp, tag); + + if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) { + dev_err(ap->dev, "DMA not pending eot=0x%08x " + "err=0x%08x tag=0x%02x pending=%d\n", + tfr_reg, err_reg, tag, + hsdevp->dma_pending[tag]); + } + + if ((hsdevp->dma_interrupt_count % 2) == 0) + sata_dwc_dma_xfer_complete(ap, 1); + + /* Clear the interrupt */ + out_le32(&(sata_dma_regs->interrupt_clear.tfr.low), + DMA_CHANNEL(chan)); + } + + /* Check for error interrupt. */ + /* We do not expect error happens */ + if (unlikely(err_reg & DMA_CHANNEL(chan))) { + /* TODO Need error handler ! */ + dev_err(ap->dev, "error interrupt err_reg=0x%08x\n", + err_reg); + + /* Clear the interrupt. */ + out_le32(&(sata_dma_regs->interrupt_clear.error.low), + DMA_CHANNEL(chan)); + } +#if 0 + } +#endif + spin_unlock_irqrestore(&host->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t dma_dwc_handler(int irq, void *hsdev_instance) +{ + volatile u32 tfr_reg, err_reg; + int chan; + + tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low)); + err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low)); + + for (chan = 0; chan < DMA_NUM_CHANS; chan++) { + /* Check for end-of-transfer interrupt. */ + + if (tfr_reg & DMA_CHANNEL(chan)) { + dma_dwc_interrupt(0, dwc_dev_list[chan]); + } + + /* Check for error interrupt. */ + if (err_reg & DMA_CHANNEL(chan)) { + dma_dwc_interrupt(0, dwc_dev_list[chan]); + } + } + + return IRQ_HANDLED; +} + + +/* + * Function: dma_request_interrupts + * arguments: hsdev + * returns status + * This function registers ISR for a particular DMA channel interrupt + */ +static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq) +{ + int retval = 0; + int chan; + + for (chan = 0; chan < DMA_NUM_CHANS; chan++) { + /* Unmask error interrupt */ + out_le32(&sata_dma_regs->interrupt_mask.error.low, + in_le32(&sata_dma_regs->interrupt_mask.error.low) | DMA_ENABLE_CHAN(chan)); + + /* Unmask end-of-transfer interrupt */ + out_le32(&sata_dma_regs->interrupt_mask.tfr.low, + in_le32(&sata_dma_regs->interrupt_mask.tfr.low) | DMA_ENABLE_CHAN(chan)); +#if 0 + out_le32(&sata_dma_regs->interrupt_mask.block.low, + DMA_ENABLE_CHAN(chan)); + + out_le32(&sata_dma_regs->interrupt_mask.srctran.low, + DMA_ENABLE_CHAN(chan)); + + out_le32(&sata_dma_regs->interrupt_mask.dsttran.low, + DMA_ENABLE_CHAN(chan)); +#endif + } + + /* + * FIXME: 2 SATA controllers share the same DMA engine so + * currently, they also share same DMA interrupt + */ + if (!dma_intr_registered) { + //retval = request_irq(irq, dma_dwc_interrupt, IRQF_SHARED, "SATA DMA", hsdev); + retval = request_irq(irq, dma_dwc_handler, IRQF_DISABLED, "SATA DMA", NULL); + if (retval) { + dev_err(hsdev->dev, "%s: could not get IRQ %d\n", __func__, irq); + return -ENODEV; + } + dma_intr_registered = 1; + } + + + /* Mark this interrupt as requested */ + hsdev->irq_dma = irq; + + return 0; +} + +/* + * Function: map_sg_to_lli + * arguments: sg: scatter/gather list(sg) + * num_elems: no of elements in sg list + * dma_lli: LLI table + * dest: destination address + * read: whether the transfer is read or write + * returns array of AHB DMA Linked List Items + * This function creates a list of LLIs for DMA Xfr and returns the number + * of elements in the DMA linked list. + * + * Note that the Synopsis driver has a comment proposing that better performance + * is possible by only enabling interrupts on the last item in the linked list. + * However, it seems that could be a problem if an error happened on one of the + * first items. The transfer would halt, but no error interrupt would occur. + * + * Currently this function sets interrupts enabled for each linked list item: + * DMA_CTL_INT_EN. + */ +static int map_sg_to_lli(struct ata_queued_cmd *qc, struct lli *lli, + dma_addr_t dma_lli, void __iomem *dmadr_addr) +{ + struct scatterlist *sg = qc->sg; + struct device *dwc_dev = qc->ap->dev; + int num_elems = qc->n_elem; + int dir = qc->dma_dir; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(qc->ap); + + int i, idx = 0; + int fis_len = 0; + dma_addr_t next_llp; + int bl; + unsigned int dma_ts = 0; + + dev_dbg(dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x " + "dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli, + (u32)dmadr_addr); + + bl = get_burst_length_encode(AHB_DMA_BRST_DFLT); + + for (i = 0; i < num_elems; i++, sg++) { + u32 addr, offset; + u32 sg_len, len; + + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + dev_dbg(dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len=%d\n", + __func__, i, addr, sg_len); + + while (sg_len) { + + if (unlikely(idx >= SATA_DWC_DMAC_LLI_NUM)) { + /* The LLI table is not large enough. */ + dev_err(dwc_dev, "LLI table overrun (idx=%d)\n", + idx); + break; + } + len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ? + SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len; + + offset = addr & 0xffff; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + /* + * Make sure a LLI block is not created that will span a + * 8K max FIS boundary. If the block spans such a FIS + * boundary, there is a chance that a DMA burst will + * cross that boundary -- this results in an error in + * the host controller. + */ + if (unlikely(fis_len + len > 8192)) { + dev_dbg(dwc_dev, "SPLITTING: fis_len=%d(0x%x) " + "len=%d(0x%x)\n", fis_len, fis_len, + len, len); + len = 8192 - fis_len; + fis_len = 0; + } else { + fis_len += len; + } + if (fis_len == 8192) + fis_len = 0; + + /* + * Set DMA addresses and lower half of control register + * based on direction. + */ +#if defined(VERBOSE_DEBUG) + dev_notice(qc->ap->dev, "%s: sg_len = %d, len = %d\n", __func__, sg_len, len); +#endif + +#if defined(CONFIG_APM82181) + if (dir == DMA_FROM_DEVICE) { + lli[idx].dar = cpu_to_le32(addr); + lli[idx].sar = cpu_to_le32((u32)dmadr_addr); + if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | + DMA_CTL_SMS(1) | /* Source: Master 2 */ + DMA_CTL_DMS(0) | /* Dest: Master 1 */ + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_SINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | + DMA_CTL_SMS(2) | /* Source: Master 3 */ + DMA_CTL_DMS(0) | /* Dest: Master 1 */ + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_SINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } + } else { /* DMA_TO_DEVICE */ + lli[idx].sar = cpu_to_le32(addr); + lli[idx].dar = cpu_to_le32((u32)dmadr_addr); + if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | + DMA_CTL_SMS(0) | + DMA_CTL_DMS(1) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_DINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | + DMA_CTL_SMS(0) | + DMA_CTL_DMS(2) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_DINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } + } +#else + if (dir == DMA_FROM_DEVICE) { + lli[idx].dar = cpu_to_le32(addr); + lli[idx].sar = cpu_to_le32((u32)dmadr_addr); + + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | + DMA_CTL_SMS(0) | + DMA_CTL_DMS(1) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_SINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } else { /* DMA_TO_DEVICE */ + lli[idx].sar = cpu_to_le32(addr); + lli[idx].dar = cpu_to_le32((u32)dmadr_addr); + + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | + DMA_CTL_SMS(1) | + DMA_CTL_DMS(0) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_DINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } +#endif + dev_dbg(dwc_dev, "%s setting ctl.high len: 0x%08x val: " + "0x%08x\n", __func__, len, + DMA_CTL_BLK_TS(len / 4)); + + /* Program the LLI CTL high register */ + dma_ts = DMA_CTL_BLK_TS(len / 4); + lli[idx].ctl.high = cpu_to_le32(dma_ts); + + /* + *Program the next pointer. The next pointer must be + * the physical address, not the virtual address. + */ + next_llp = (dma_lli + ((idx + 1) * sizeof(struct lli))); + + /* The last 2 bits encode the list master select. */ +#if defined(CONFIG_APM82181) + next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER1); +#else + next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2); +#endif + + lli[idx].llp = cpu_to_le32(next_llp); +#if defined(VERBOSE_DEBUG) + dev_notice(qc->ap->dev, "%s: index %d\n", __func__, idx); + dev_notice(qc->ap->dev, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx].ctl.high); + dev_notice(qc->ap->dev, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx].ctl.low); + dev_notice(qc->ap->dev, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx].dar); + dev_notice(qc->ap->dev, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx].sar); + dev_notice(qc->ap->dev, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx].llp); +#endif + + idx++; + sg_len -= len; + addr += len; + } + } + + /* + * The last next ptr has to be zero and the last control low register + * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source + * and destination enable) set back to 0 (disabled.) This is what tells + * the core that this is the last item in the linked list. + */ + if (likely(idx)) { + lli[idx-1].llp = 0x00000000; + lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32; + + /* Flush cache to memory */ + dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx), + DMA_BIDIRECTIONAL); + } + +#if defined(VERBOSE_DEBUG) + dev_notice(qc->ap->dev, "%s: Final index %d\n", __func__, idx-1); + dev_notice(qc->ap->dev, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx-1].ctl.high); + dev_notice(qc->ap->dev, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx-1].ctl.low); + dev_notice(qc->ap->dev, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx-1].dar); + dev_notice(qc->ap->dev, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx-1].sar); + dev_notice(qc->ap->dev, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx-1].llp); +#endif + + return idx; +} + +/* + * Function: dma_dwc_xfer_start + * arguments: Channel number + * Return : None + * Enables the DMA channel + */ +static void dma_dwc_xfer_start(int dma_ch) +{ + /* Enable the DMA channel */ + out_le32(&(sata_dma_regs->dma_chan_en.low), + in_le32(&(sata_dma_regs->dma_chan_en.low)) | + DMA_ENABLE_CHAN(dma_ch)); +#if defined(VERBOSE_DEBUG) + printk("%s: setting sata_dma_regs->dma_chan_en.low with val: 0x%08x\n", + __func__, in_le32(&(sata_dma_regs->dma_chan_en.low))); +#endif +} + +static int dma_dwc_channel_enabled(int ch) +{ + if (in_le32(&(sata_dma_regs->dma_chan_en.low)) & DMA_CHANNEL(ch)) + return 1; + + return 0; +} + +static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch) +{ + int enabled = dma_dwc_channel_enabled(dma_ch); + + dev_info(ap->dev, "%s terminate DMA on channel=%d enabled=%d\n", + __func__, dma_ch, enabled); + + if (enabled) { + out_le32(&(sata_dma_regs->dma_chan_en.low), + in_le32(&(sata_dma_regs->dma_chan_en.low)) | DMA_DISABLE_CHAN(dma_ch)); + + do { + enabled = dma_dwc_channel_enabled(dma_ch); + } while (enabled); + } +} + +static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc, + struct lli *lli, dma_addr_t dma_lli, + void __iomem *addr) +{ + int dma_ch; + int num_lli; + + /* Acquire DMA channel */ + dma_ch = dma_request_channel(qc->ap); + if (unlikely(dma_ch == -1)) { + dev_err(qc->ap->dev, "%s: dma channel unavailable\n", __func__); + return -EAGAIN; + } +#if defined(VERBOSE_DEBUG) + dev_notice(qc->ap->dev, "%s: Got channel %d\n", __func__, dma_ch); +#endif + /* Convert SG list to linked list of items (LLIs) for AHB DMA */ + num_lli = map_sg_to_lli(qc, lli, dma_lli, addr); + + dev_dbg(qc->ap->dev, "%s sg: 0x%p, count: %d lli: %p dma_lli: 0x%0xlx addr:" + " %p lli count: %d\n", __func__, qc->sg, qc->n_elem, lli, + (u32)dma_lli, addr, num_lli); + + /* Clear channel interrupts */ + clear_chan_interrupts(dma_ch); + + /* Program the CFG register. */ +#if defined(CONFIG_APM82181) + if (dma_ch == 0) { + /* Buffer mode enabled, FIFO_MODE = 0 */ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x000000d); + /* Channel 0 bit [7:5]*/ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000000); + } else if (dma_ch == 1) { + /* Buffer mode enabled, FIFO_MODE = 0 */ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x0000088d); + /* Channel 1 - bit [7:5] */ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020); + } +#else + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), + DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ); + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0); +#endif + /* Program the address of the linked list */ +#if defined(CONFIG_APM82181) + out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low), + DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER1)); +#else + out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low), + DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2)); +#endif + + /* Program the CTL register with src enable / dst enable */ + //out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low), + // DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN); + out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low), 0x18000000); + +#if defined(VERBOSE_DEBUG) + dev_notice(qc->ap->dev, "%s DMA channel %d is ready\n", __func__, dma_ch); + dev_notice(qc->ap->dev, "%s setting cfg.high of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high))); + dev_notice(qc->ap->dev, "%s setting cfg.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low))); + dev_notice(qc->ap->dev, "%s setting llp.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low))); + dev_notice(qc->ap->dev, "%s setting ctl.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low))); +#endif + + return dma_ch; +} + |